
    g`                       d Z ddlZddlZddlmZmZmZmZ ddlZddl	Zddlm
Z
 ddlmZmZmZ ddlmZ ddlmZ dd	lmZmZ dd
lmZmZmZmZmZmZmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$m%Z%m&Z& ddl'm(Z(  e%j)        e*          Z+dZ,dZ-g dZ.dej/        de0de0fdZ1 G d de
j2                  Z3 G d de
j4                  Z5 G d de
j4                  Z6 G d de
j4                  Z7 G d de
j4                  Z8 G d  d!e
j4                  Z9 G d" d#e          Z:d$Z;d%Z<d&Z=d'Z>d(Z? G d) d*e:          Z@ G d+ d,e:          ZA e#d-e;           G d. d/e:                      ZB e#d0e;           G d1 d2e:e                      ZC e#d3e;           G d4 d5e:                      ZD e#d6e;           G d7 d8e:                      ZE G d9 d:e:          ZF G d; d<e:e          ZGdS )=zPyTorch MVP model.    N)ListOptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)GenerationMixin)_prepare_4d_attention_mask!_prepare_4d_causal_attention_mask)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutput#Seq2SeqQuestionAnsweringModelOutputSeq2SeqSequenceClassifierOutput)PreTrainedModel)add_code_sample_docstringsadd_end_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )	MvpConfigzRUCAIBox/mvpr   )r      i   	input_idspad_token_iddecoder_start_token_idc                     |                      | j                  }| ddddf                                         |ddddf<   ||dddf<   |t          d          |                    |dk    |           |S )z1
    Shift input ids one token to the right.
    Nr   r   z1self.model.config.pad_token_id has to be defined.i)	new_zerosshapeclone
ValueErrormasked_fill_)r!   r"   r#   shifted_input_idss       `/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/mvp/modeling_mvp.pyshift_tokens_rightr-   <   s     "++IO<<(CRC06688aaae4aaadLMMM""#4#<lKKK    c                   L     e Zd ZdZdedef fdZd	dej        def fdZ xZ	S )
MvpLearnedPositionalEmbeddingzN
    This module learns positional embeddings up to a fixed maximum size.
    num_embeddingsembedding_dimc                 j    d| _         t                                          || j         z   |           d S N   )offsetsuper__init__)selfr1   r2   	__class__s      r,   r8   z&MvpLearnedPositionalEmbedding.__init__R   s3     $+5}EEEEEr.   r   r!   past_key_values_lengthc                     |j         dd         \  }}t          j        |||z   t          j        | j        j                                      |d          }t                                          || j	        z             S )z3`input_ids' shape is expected to be [bsz x seqlen].Nr5   )dtypedevicer%   )
r'   torcharangelongweightr>   expandr7   forwardr6   )r9   r!   r;   bszseq_len	positionsr:   s         r,   rD   z%MvpLearnedPositionalEmbedding.forwardX   sv     !rr*WL"$:W$DEJ_c_j_q
 
 

&b// 	 wwy4;6777r.   )r   )
__name__
__module____qualname____doc__intr8   r?   TensorrD   __classcell__r:   s   @r,   r0   r0   M   s         Fs F3 F F F F F F8 8 8s 8 8 8 8 8 8 8 8 8 8r.   r0   c                   l    e Zd ZdZ	 	 	 ddedededed	ef
 fd
Zdej	        dedefdZ
	 	 	 	 	 	 ddej	        deej	                 deeej	                          deej	                 deej	                 deej	                 dedeej	        eej	                 eeej	                          f         fdZ xZS )MvpAttentionz=Multi-headed attention from 'Attention Is All You Need' paper        FT	embed_dim	num_headsdropout
is_decoderbiasc                    t                                                       || _        || _        || _        ||z  | _        | j        |z  | j        k    rt          d| j         d| d          | j        dz  | _        || _        t          j
        |||          | _        t          j
        |||          | _        t          j
        |||          | _        t          j
        |||          | _        d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      ࿩rW   )r7   r8   rS   rT   rU   head_dimr)   scalingrV   r   Lineark_projv_projq_projout_proj)r9   rS   rT   rU   rV   rW   r:   s         r,   r8   zMvpAttention.__init__f   s    	""!Y.MI%$.883dn 3 3%.3 3 3   }d*$i	94@@@i	94@@@i	94@@@	)YTBBBr.   tensorrF   rE   c                     |                     ||| j        | j                                      dd                                          S )Nr   r5   )viewrT   rZ   	transpose
contiguous)r9   ra   rF   rE   s       r,   _shapezMvpAttention._shape   s<    {{3GGQQRSUVWWbbdddr.   Nhidden_stateskey_value_statespast_key_valueattention_masklayer_head_maskattn_promptoutput_attentionsreturnc                    |du}|                                 \  }	}
}|                     |          | j        z  }|r||d         }|d         }n>|rU|                     |                     |          d|	          }|                     |                     |          d|	          }n||                     |                     |          d|	          }|                     |                     |          d|	          }t          j        |d         |gd          }t          j        |d         |gd          }nT|                     |                     |          d|	          }|                     |                     |          d|	          }| j        r||f}|t          j        |d         	                    |	ddd          |gd          }t          j        |d         	                    |	ddd          |gd          }|`t          j
        |	d|
|d                              d                                        |j                  }t          j        ||gd          }|	| j        z  d| j        f} |                     ||
|	          j        | } |j        | } |j        | }|                     d          }t          j        ||                    dd                    }|                                 |	| j        z  |
|fk    r2t%          d|	| j        z  |
|f d|                                            ||                                 |	d|
|fk    r+t%          d	|	d|
|f d|                                            |                    |	| j        |
|          |z   }|                    |	| j        z  |
|          }t&          j                            |d          }||                                 | j        fk    r-t%          d
| j        f d|                                            |                    dddd          |                    |	| j        |
|          z  }|                    |	| j        z  |
|          }|r=|                    |	| j        |
|          }|                    |	| j        z  |
|          }nd}t&          j                            || j        | j                  }t          j        ||          }|                                 |	| j        z  |
| j        fk    r5t%          d|	| j        |
| j        f d|                                            |                    |	| j        |
| j                  }|                    dd          }|                    |	|
| j                  }|                     |          }|||fS )z#Input shape: Batch x Time x ChannelNr   r   r%   r5   dimz$Attention weights should be of size z	, but is z!Attention mask should be of size z/Head mask for a single layer should be of size ptrainingz `attn_output` should be of size )sizer_   r[   rf   r]   r^   r?   catrV   rC   zerostor>   rT   rZ   rc   bmmrd   r)   r   
functionalsoftmaxrU   rt   reshaperS   r`   )r9   rg   rh   ri   rj   rk   rl   rm   is_cross_attentionrE   tgt_len_query_states
key_statesvalue_statesprompt_mask
proj_shapesrc_lenattn_weightsattn_weights_reshaped
attn_probsattn_outputs                         r,   rD   zMvpAttention.forward   s#    .T9',,..Wa {{=11DL@ 	L."<'*J)!,LL 	LT[[1A%B%BBLLJ;;t{{3C'D'Db#NNLL'T[[%?%?SIIJ;;t{{='A'A2sKKLN1$5z#BJJJJ 9nQ&7%FANNNLL T[[%?%?SIIJ;;t{{='A'A2sKKL? 	8 ),7N"KN$9$9#r2r$J$JJ#W]^___J 9k!n&;&;CR&L&Ll%[abcccL)#k#q';q>;N;Nq;Q;QRRUUVdVkll!&K+Hr!S!S!SDN*B>
Ct{{<#>>CZP$Z_j1
(|(*5//!$$yz/C/CAq/I/IJJ3#7'"JJJ*dn8LgW^7_ * * %%''* *  
 %""$$a'(BBB ta'8Rtt]k]p]p]r]rtt   (,,S$.'7SSVddL',,S4>-A7GTTL},,\r,BB&##%%$.)::: 1t~FW 1 1',,..1 1   +//2q!<<|?P?PQTVZVdfmov?w?wwL',,S4>-A7GTTL 	)
 %1$5$5c4>7T[$\$\!055cDN6JGU\]]LL$(!]**<4<RVR_*``
i
L99#"6!OOO)CRVR_3` ) )$$&&) )  
 "&&sDNGT]SS!++Aq11 "))#wGGmmK001>AAr.   )rR   FT)NNNNNF)rH   rI   rJ   rK   rL   floatboolr8   r?   rM   rf   r   r   rD   rN   rO   s   @r,   rQ   rQ   c   s       GG  C CC C 	C
 C C C C C C C6eU\ eC ec e e e e 488<1526.2"'wB wB|wB #5<0wB !u|!45	wB
 !.wB "%,/wB el+wB  wB 
u|Xel3XeEL>Q5RR	SwB wB wB wB wB wB wB wBr.   rQ   c                        e Zd Zdef fdZ	 ddej        dej        dej        dej        dee         d	e	ej        eej                 f         fd
Z
 xZS )MvpEncoderLayerconfigc                    t                                                       |j        | _        t	          | j        |j        |j                  | _        t          j	        | j                  | _
        |j        | _        t          |j                 | _        |j        | _        t          j        | j        |j                  | _        t          j        |j        | j                  | _        t          j	        | j                  | _        d S )N)rS   rT   rU   )r7   r8   d_modelrS   rQ   encoder_attention_headsattention_dropout	self_attnr   	LayerNormself_attn_layer_normrU   r   activation_functionactivation_fnactivation_dropoutr\   encoder_ffn_dimfc1fc2final_layer_normr9   r   r:   s     r,   r8   zMvpEncoderLayer.__init__   s    %n4,
 
 

 %'L$@$@!~#F$>?"(";9T^V-CDD9V3T^DD "T^ < <r.   Frg   rj   rk   self_attn_promptrm   rn   c                 ~   |}|                      |||||          \  }}}t          j                            || j        | j                  }||z   }|                     |          }|}|                     |                     |                    }t          j                            || j        | j                  }| 	                    |          }t          j                            || j        | j                  }||z   }| 
                    |          }|j        t          j        k    rt          j        |                                          s&t          j        |                                          r9t          j        |j                  j        dz
  }	t          j        ||	 |	          }|f}
|r|
|fz  }
|
S )a@  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape
                `(2, encoder_attention_heads, pro_len, head_dim)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )rg   rj   rk   rl   rm   rr   i  )minmax)r   r   rz   rU   rt   r   r   r   r   r   r   r=   r?   float16isinfanyisnanfinfor   clamp)r9   rg   rj   rk   r   rm   residualr   r   clamp_valueoutputss              r,   rD   zMvpEncoderLayer.forward  s   * !)-')+(/ *8 *
 *
&|Q --mt|VZVc-dd =011-@@ **488M+B+BCC--mt?Vaean-oo//--mt|VZVc-dd =0--m<<%-//K&&**,, 005M0J0J0N0N0P0P 0  +m&9::>EK!KK<[YYYM " 	'&Gr.   )F)rH   rI   rJ   r   r8   r?   FloatTensorr   r   r   rD   rN   rO   s   @r,   r   r      s        =y = = = = = =, -24 4(4 )4 *	4
  +4 $D>4 
u (5+<"==	>4 4 4 4 4 4 4 4r.   r   c                       e Zd Zdef fdZ	 	 	 	 	 	 	 	 	 	 ddej        deej                 deej                 d	eej                 d
eej                 deej                 deej                 deej                 deeej                          dee	         dee	         deej
        eeej
        ej
        f                  f         fdZ xZS )MvpDecoderLayerr   c                    t                                                       |j        | _        t	          | j        |j        |j        d          | _        |j        | _        t          |j
                 | _        |j        | _        t          j        | j                  | _        t	          | j        |j        |j        d          | _        t          j        | j                  | _        t          j        | j        |j                  | _        t          j        |j        | j                  | _        t          j        | j                  | _        d S )NT)rS   rT   rU   rV   )rU   rV   )r7   r8   r   rS   rQ   decoder_attention_headsr   r   rU   r   r   r   r   r   r   r   encoder_attnencoder_attn_layer_normr\   decoder_ffn_dimr   r   r   r   s     r,   r8   zMvpDecoderLayer.__init__G  s   %n4,	
 
 
 ~#F$>?"(";$&L$@$@!(N*,	
 
 
 (*|DN'C'C$9T^V-CDD9V3T^DD "T^ < <r.   NFTrg   rj   encoder_hidden_statesencoder_attention_maskrk   cross_attn_layer_head_maskr   cross_attn_promptri   rm   	use_cachern   c           	      |   |}|	
|	dd         nd}|                      ||||||
          \  }}}t          j                            || j        | j                  }||z   }|                     |          }d}d}|{|}|	
|	dd         nd}|                     |||||||
          \  }}}t          j                            || j        | j                  }||z   }|                     |          }||z   }|}|                     | 	                    |                    }t          j                            || j
        | j                  }|                     |          }t          j                            || j        | j                  }||z   }|                     |          }|f}|
r|||fz  }|r||fz  }|S )aC  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size `(decoder_attention_heads,)`.
            self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape
                `(2, decoder_attention_heads, pro_len, head_dim)`.
            cross_attn_prompt (`torch.FloatTensor`): prompt of cross attention of shape
                `(2, decoder_attention_heads, pro_len, head_dim)`.
            past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        Nr5   )rg   ri   rj   rk   rl   rm   rr   )rg   rh   rj   rk   rl   ri   rm   )r   r   rz   rU   rt   r   r   r   r   r   r   r   r   )r9   rg   rj   r   r   rk   r   r   r   ri   rm   r   r   self_attn_past_key_valueself_attn_weightspresent_key_valuecross_attn_present_key_valuecross_attn_weightscross_attn_past_key_valuer   s                       r,   rD   zMvpDecoderLayer.forwarda  s3   H ! :H9S>"1"#5#5Y] >Bnn'3)+(/ ?M ?
 ?
;(*; --mt|VZVc-dd =011-@@ (,$! ,$H @N?Yrss(;(;_c%NRN_N_+!65 :-8"3 O` O OKM-/K M11-4<Z^Zg1hhM$}4M 88GGM !24P P !**488M+B+BCC--mt?Vaean-oo//--mt|VZVc-dd =0--m<< " 	?)+=>>G 	,)++Gr.   )
NNNNNNNNFT)rH   rI   rJ   r   r8   r?   rM   r   r   r   r   rD   rN   rO   s   @r,   r   r   F  sq       =y = = = = = =: 268<9=26=A37488<,1$(_ _|_ !._  (5	_
 !) 6_ "%,/_ %-U\$:_ #5<0_ $EL1_ !u|!45_ $D>_ D>_ 
u (51BEDU1U+V"WW	X_ _ _ _ _ _ _ _r.   r   c                   X     e Zd ZdZdedededef fdZdej        dej        fd	Z	 xZ
S )
MvpClassificationHeadz-Head for sentence-level classification tasks.	input_dim	inner_dimnum_classespooler_dropoutc                     t                                                       t          j        ||          | _        t          j        |          | _        t          j        ||          | _        d S )Nrs   )r7   r8   r   r\   denseDropoutrU   r`   )r9   r   r   r   r   r:   s        r,   r8   zMvpClassificationHead.__init__  sY     	Yy)44
zN333	)[99r.   rg   rn   c                     |                      |          }|                     |          }t          j        |          }|                      |          }|                     |          }|S N)rU   r   r?   tanhr`   )r9   rg   s     r,   rD   zMvpClassificationHead.forward  s[    ]33

=11
=11]33m44r.   )rH   rI   rJ   rK   rL   r   r8   r?   rM   rD   rN   rO   s   @r,   r   r     s        77
:
: 
: 	
:
 
: 
: 
: 
: 
: 
:U\ el        r.   r   c                   R     e Zd ZdZ fdZdej        deej                 fdZ xZ	S )	MvpPromptz)Layer-wise prompt for encoder or decoder.c           	         t                                                       |j        | _        || _        || _        |j        |z  | _        t          j        |j	                  | _	        t          j
        |j        |j                  | _        t          j        t          j        |j        |j                  t          j                    t          j        |j        |dz  |j        z                      | _        d S )Nr   r5   )r7   r8   prompt_length
num_layersrT   r   rZ   r   r   rU   	Embeddingprompt_embedding
Sequentialr\   prompt_mid_dimGELUprompt_trans)r9   r   r   rT   r:   s       r,   r8   zMvpPrompt.__init__  s    #1$")3zFN333 "V-A6> R RMIfnf&;<<GIIIf+Z!^fn-LMM
 
r.   
prompt_idsrn   c                 2   |                      |                     |                    }|                    | j        | j        dz  | j        | j                  }|                     |          }|                    g d          	                    d          }|S )Nr5   )r   r5   r   r   )
r   r   rc   r   r   rT   rZ   rU   permutesplit)r9   r   prompts      r,   rD   zMvpPrompt.forward  s    ""4#8#8#D#DEET/11DdnVZVcddf%%--33A66r.   )
rH   rI   rJ   rK   r8   r?   rM   r   rD   rN   rO   s   @r,   r   r     si        33
 
 
 
 
%, 53F        r.   r   c                   6    e Zd ZeZdZdZd Zed             Z	dS )MvpPreTrainedModelmodelTc                    | j         j        }t          |t          j                  rJ|j        j                            d|           |j         |j        j        	                                 d S d S t          |t          j
                  rS|j        j                            d|           |j        -|j        j        |j                 	                                 d S d S d S )NrR   )meanstd)r   init_std
isinstancer   r\   rB   datanormal_rW   zero_r   padding_idx)r9   moduler   s      r,   _init_weightsz MvpPreTrainedModel._init_weights  s    k"fbi(( 	?M&&CS&999{& &&((((( '&-- 	?M&&CS&999!-"6#56<<>>>>>	? 	?--r.   c                     | j         j        }t          j        g ddddd|gg| j                  }|                    |          |d}|S )N)r      
      r5   r   r       r5   r>   )rj   r!   )r   r"   r?   ra   r>   ne)r9   	pad_tokenr!   dummy_inputss       r,   r   zMvpPreTrainedModel.dummy_inputs  sa    K,	L"2"2"2Q2q)4L!MVZVabbb	'll955"
 
 r.   N)
rH   rI   rJ   r   config_classbase_model_prefixsupports_gradient_checkpointingr   propertyr    r.   r,   r   r     sO        L&*#	? 	? 	?   X  r.   r   aH  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`MvpConfig`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
            is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).

            For translation and summarization training, `decoder_input_ids` should be provided. If no
            `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
            for denoising pre-training following the paper.
        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.

            If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`]
            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
            information on the default strategy.
        head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
            1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
            hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
            `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

            Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
            blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
            This is useful if you want more control over how to convert `input_ids` indices into associated vectors
            than the model's internal embedding lookup matrix.
        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
            representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
            input (see `past_key_values`). This is useful if you want more control over how to convert
            `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.

            If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
            of `inputs_embeds`.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
a  
    Example of summarization:

    Fine-tuning a model
    ```python
    >>> import torch
    >>> from transformers import AutoTokenizer, MvpForConditionalGeneration

    >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
    >>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp")

    >>> inputs = tokenizer(
    ...     "Summarize: You may want to stick it to your boss and leave your job, but don't do it if these are your reasons.",
    ...     return_tensors="pt",
    ... )
    >>> labels = tokenizer("Bad Reasons To Quit Your Job", return_tensors="pt")["input_ids"]

    >>> loss = model(**inputs, labels=labels).loss
    >>> loss.backward()
    ```

    Inference after the model fine-tuned
    ```python
    >>> with torch.no_grad():
    ...     generated_ids = model.generate(**inputs)

    >>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
    ```
a_  
    Example of single-label classification:

    Fine-tuning a model on `num_labels` classes
    ```python
    >>> import torch
    >>> from transformers import AutoTokenizer, MvpForSequenceClassification

    >>> num_labels = 2  # for example, this is a binary classification task
    >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
    >>> model = MvpForSequenceClassification.from_pretrained("RUCAIBox/mvp", num_labels=num_labels)

    >>> inputs = tokenizer("Classify: Hello, my dog is cute", return_tensors="pt")
    >>> labels = torch.tensor(1)  # the real label for inputs

    >>> loss = model(**inputs, labels=labels).loss
    >>> loss.backward()
    ```

    Inference after the model fine-tuned
    ```python
    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits

    >>> predicted_class_id = logits.argmax()
    ```
a  
    Example:

    Fine-tuning a model for extrative question answering, and our model also supports generative question answering
    using `BartForConditionalGeneration`
    ```python
    >>> import torch
    >>> from transformers import AutoTokenizer, MvpForQuestionAnswering

    >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
    >>> model = MvpForQuestionAnswering.from_pretrained("RUCAIBox/mvp")

    >>> inputs = tokenizer(
    ...     "Answer the following question: Who was Jim Henson? [SEP] Jim Henson was a nice puppet",
    ...     return_tensors="pt",
    ... )
    >>> target_start_index = torch.tensor([18])
    >>> target_end_index = torch.tensor([19])

    >>> loss = model(**inputs, start_positions=target_start_index, end_positions=target_end_index).loss
    >>> loss.backward()
    ```

    Inference after the model fine-tuned
    ```python
    >>> with torch.no_grad():
    ...     outputs = model(**inputs)

    >>> answer_start_index = outputs.start_logits.argmax()
    >>> answer_end_index = outputs.end_logits.argmax()

    >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
    >>> predict_answer = tokenizer.decode(predict_answer_tokens)
    ```
c                       e Zd ZdZ	 ddedeej                 dee         f fdZ	d Z
d	 Z	 	 	 	 	 	 	 dd
ej        deej                 deej                 deej                 dee         dee         dee         deeef         fdZ xZS )
MvpEncodera  
    Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
    [`MvpEncoderLayer`].

    Args:
        config: MvpConfig
        embed_tokens (nn.Embedding): output embedding
        use_prompt (bool): whether to use prompt
    NFr   embed_tokens
use_promptc                    t                                                     j        | _        j        | _        j        }j        | _        j        | _	        j
        rt          j        |          nd| _        ||| _        n%t          j        j        || j                  | _        t%          j        |          | _        t          j        fdt+          j                  D                       | _        t          j        |          | _        || _        |r,j        | _        t9          j        j                  | _        d| _        |                                   d S )N      ?c                 .    g | ]}t                    S r   )r   .0r   r   s     r,   
<listcomp>z'MvpEncoder.__init__.<locals>.<listcomp>  !    $c$c$c_V%<%<$c$c$cr.   F)!r7   r8   rU   encoder_layerdrop	layerdropr   r"   r   max_position_embeddingsmax_source_positionsscale_embeddingmathsqrtembed_scaler   r   r   
vocab_sizer0   embed_positions
ModuleListrangeencoder_layerslayersr   layernorm_embeddingr   r   r   r   r   gradient_checkpointing	post_init)r9   r   r   r   rS   r:   s    `   r,   r8   zMvpEncoder.__init__  sU    	   ~1N	!.$*$B!393IR49Y///s# ,D "V->	4K[ \ \D<* 
  
 m$c$c$c$ceFLaFbFb$c$c$cdd#%<	#:#: $ 	!'!5D$-%.% %D! ',#r.   c                     | j         S r   r   r9   s    r,   get_input_embeddingszMvpEncoder.get_input_embeddings        r.   c                     || _         d S r   r  r9   values     r,   set_input_embeddingszMvpEncoder.set_input_embeddings      !r.   r!   rj   	head_maskinputs_embedsrm   output_hidden_statesreturn_dictrn   c           	      (   ||n| j         j        }||n| j         j        }||n| j         j        }||t	          d          |&|}|j        }	|                    d|	d                   }n=|,|                                dd         }	|dddddf         }nt	          d          ||                     |          | j	        z  }| 
                    |          }
||
z   }|                     |          }t          j                            || j        | j                  }| j        rFt#          j        | j                                      | j                  }|                     |          }|t/          ||j                  }|rdnd}|rdnd}|p|                                d         t3          | j                  k    r@t	          dt3          | j                   d	|                                d          d
          t7          | j                  D ]\  }}|r||fz   }d}| j        r!t#          j        g           }|| j        k     rd}|rd}nx| j        r@| j        r9|                     |j         |||||         nd| j        r||         nd|          }n) ||||||         nd| j        r||         nd|          }|d         }|r||d         fz   }|r||fz   }|stC          d |||fD                       S tE          |||          S )a~  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        NzDYou cannot specify both input_ids and inputs_embeds at the same timer%   z5You have to specify either input_ids or inputs_embedsrr   r   r   z&The head_mask should be specified for  layers, but it is for .FT)NN)rk   r   rm   r   c              3      K   | ]}||V  	d S r   r   r  vs     r,   	<genexpr>z%MvpEncoder.forward.<locals>.<genexpr>  s(      eeqWXWdWdWdWdWdeer.   last_hidden_staterg   
attentions)#r   rm   r!  use_return_dictr)   r'   rc   ru   r   r  r  r  r   rz   rU   rt   r   r?   r@   r   rx   r>   r   r   r=   lenr  	enumeraterandr  r  _gradient_checkpointing_func__call__tupler   )r9   r!   rj   r  r   rm   r!  r"  inputinput_shape	embed_posrg   r   r   encoder_statesall_attentionsidxencoder_layerto_dropdropout_probabilitylayer_outputss                        r,   rD   zMvpEncoder.forward  s   \ 2C1N--TXT_Tq$8$D  $+Jj 	 &1%<kk$+B]  ]%>cddd"E+K!r;r?;;II&',,..ss3K!!!!QQQ(+EETUUU  --i884;KKM((//	%	100??--mt|VZVc-dd ? 	Ad&899<<T[IIJ#44Z@@ %7H[\\N3=0:d  ~~"s4;'7'788 /S=M=M / /!((+/ / /  
 #,DK"8"8 "	F "	FC# C!/=2B!BG} #&+jnn#&77"G 1 ,. 4= $($E$E%.%&+4+@3d26/K)#..t)% %MM %2M%&;D;P3VZCG?*\*:3*?*?X\*;% % %M !.a 0  F!/=3C2E!E 	?+}.>>N 	fee]NN$Seeeeee+>Vd
 
 
 	
r.   NF)NNNNNNN)rH   rI   rJ   rK   r   r   r   r   r   r8   r  r  r?   
LongTensorrM   r   r   r   r   rD   rN   rO   s   @r,   r   r     sU         lq$ $$/7/E$ZbcgZh$ $ $ $ $ $L! ! !" " "
 '+15,059,0/3&*J
 J
#J
 !.J
 EL)	J

   12J
 $D>J
 'tnJ
 d^J
 
uo%	&J
 J
 J
 J
 J
 J
 J
 J
r.   r   c                       e Zd ZdZ	 ddedeej                 dee         f fdZ	d Z
d	 Z	 	 	 	 	 	 	 	 	 	 	 	 dd
ej        deej                 deej                 deej                 deej                 deej                 deeej                          deej                 dee         dee         dee         dee         deeef         fdZ xZS )
MvpDecoderz
    Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MvpDecoderLayer`]

    Args:
        config: MvpConfig
        embed_tokens (nn.Embedding): output embedding
        use_prompt (bool): whether to use prompt
    NFr   r   r   c                 *   t                                                     j        | _        j        | _        j        | _        j        | _        j	        rt          j        j                  nd| _        ||| _        n*t          j        j        j        | j                  | _        t%          j        j                  | _        t          j        fdt+          j                  D                       | _        t          j        j                  | _        || _        |rLj        | _        t9          j        j                  | _        t9          j        j                  | _        d| _         | !                                 d S )Nr   c                 .    g | ]}t                    S r   )r   r   s     r,   r  z'MvpDecoder.__init__.<locals>.<listcomp>  r  r.   F)"r7   r8   rU   decoder_layerdropr  r"   r   r  max_target_positionsr  r	  r
  r   r  r   r   r   r  r0   r  r  r  decoder_layersr  r   r  r   r   r   r   r   r   r  r  )r9   r   r   r   r:   s    `  r,   r8   zMvpDecoder.__init__  s{    	   ~1!.$*$B!8>8NW49V^444TW# ,D "V->PTP` a aD<*N 
  
 m$c$c$c$ceFLaFbFb$c$c$cdd#%<#?#? $ 	!'!5D$-%.% %D!
 &/%.& &D" ',#r.   c                     | j         S r   r  r  s    r,   r  zMvpDecoder.get_input_embeddings  r  r.   c                     || _         d S r   r  r  s     r,   r  zMvpDecoder.set_input_embeddings  r  r.   r!   rj   r   r   r  cross_attn_head_maskpast_key_valuesr   r   rm   r!  r"  rn   c                 p   |
|
n| j         j        }
||n| j         j        }|	|	n| j         j        }	||n| j         j        }||t          d          |&|}|j        }|                    d|d                   }n=|,|                                dd         }|dddddf         }nt          d          ||d         d         j        d         nd}|| 	                    |          | j
        z  }t          ||||          }||t          ||j        |d                   }|                     ||          }||z   }|                     |          }t           j                            || j        | j                  }| j        r[t+          j        | j                                      | j                  }|                     |          }|                     |          }| j        r%| j        r|	rt:                              d	           d
}	|rdnd}|
rdnd}|
r|dnd}|	rdnd}t?          ||gddg          D ]z\  }}|s|                                d         tA          | j!                  k    rCt          d| dtA          | j!                   d|                                d          d          {tE          | j!                  D ]%\  }}|r||fz  }| j        r t+          j#        g           }|| j$        k     r5|||         nd}| j        r_| j        rX| %                    |j&        |||||||         nd|||         nd| j        r||         nd| j        r||         ndd|
|	          }nH ||||||||         nd|||         nd| j        r||         nd| j        r||         nd||
|	          }|d         }|	r|||
rdnd         fz  }|
r||d         fz  }|||d         fz  }'|r||fz  }|	r|nd} |stO          d || |||fD                       S tQ          || |||          S )a  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                of the decoder.
            encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
                Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
                selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
                cross-attention on hidden heads. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

                Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
                cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timer%   zEYou have to specify either decoder_input_ids or decoder_inputs_embedsr   r5   )r~   rr   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr   r  rI  zThe `z` should be specified for r$  r%  )
rj   r   r   rk   r   r   r   ri   rm   r   r   r   c              3      K   | ]}||V  	d S r   r   r'  s     r,   r)  z%MvpDecoder.forward.<locals>.<genexpr>  s0        =  === r.   )r+  rJ  rg   r,  cross_attentions))r   rm   r!  r   r-  r)   r'   rc   ru   r   r  r   r   r=   r  r  r   rz   rU   rt   r   r?   r@   r   rx   r>   r   r   r  loggerwarning_oncezipr.  r  r/  r0  r  r1  r2  r3  r   )!r9   r!   rj   r   r   r  rI  rJ  r   r   rm   r!  r"  r4  r5  r;   rG   rg   r   r   r   all_hidden_statesall_self_attnsall_cross_attentionsnext_decoder_cache	attn_mask	mask_namer9  decoder_layerr<  ri   r=  
next_caches!                                    r,   rD   zMvpDecoder.forward  s   ` 2C1N--TXT_Tq$8$D  $+Jj 	 "+!6IIDK<Q	%0%<kk$+B]  ]%>sttt"E#/K!r;r?;;II&',,..ss3K!!!!QQQ(+EEdeee DSC^!3A!6!<Q!?!?de  --i884;KKM:K8N
 

 !,1G1S%?&(;[QS_& & &"
 ((0FGG	%	100??--mt|VZVc-dd ? 	Cd&899<<T[IIJ#44Z@@ $ 6 6z B B& 	"4= 	" "##p   "	 #7@BBD0:d&7h<Q<]rrdh#,6RR$ %(4H(IKYoKp$q$q 	 	 Iy$>>##A&3t{+;+;<<$3	 3 3SEUEU 3 3%NN,,Q/3 3 3  
 #,DK"8"8 3	@ 3	@C# 6!m%55!} &+jnn#&775D5P_S11VZN* t}  $ A A!*!")*&/&;IcNN1E1Q(--W[-1_F$S))$.2oG%c**4%! ! !.!#1*?+A7@7LYs^^RV5I5U,S11[_?C&X&6s&;&;TXAE'Z'8'='=VZ#1&7'! ! ! *!,M V"}:K5RQQQR'S&UU"  @=#3"55(4(]1-=,??(   	2-!11+4>''$
 	  '5FXlm     
 9+&+%1
 
 
 	
r.   r>  )NNNNNNNNNNNN)rH   rI   rJ   rK   r   r   r   r   r   r8   r  r  r?   r?  rM   r   r   r   r   r   rD   rN   rO   s   @r,   rA  rA    s         lq& &&/7/E&ZbcgZh& & & & & &P! ! !" " "
 '+15=A=A,07;=A59$(,0/3&*_
 _
#_
 !._
  ((9:	_

 !))9 :_
 EL)_
 'u|4_
 "$u'8"9:_
   12_
 D>_
 $D>_
 'tn_
 d^_
 
u??	@_
 _
 _
 _
 _
 _
 _
 _
r.   rA  zQThe bare MVP Model outputting raw hidden-states without any specific head on top.c            %       B    e Zd ZdgZddgZdef fdZd Zd Zd Z	d	 Z
d
 Z ee           eeeee          	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddej        deej                 deej                 deej                 deej                 deej                 deej                 deeej                          deeej                          deej                 deej                 dee         dee         dee         dee         deeef         f d                        Z xZS )MvpModelfinal_logits_biasencoder.embed_tokens.weightdecoder.embed_tokens.weightr   c                 f   t                                          |           |j        |j        }}|j        | _        t          j        ||j        |          | _        t          || j        |j                  | _
        t          || j        |j                  | _        |                                  d S r   )r7   r8   r"   r  r   r   r   r   sharedr   encoderrA  decoderr  )r9   r   r   r  r:   s       r,   r8   zMvpModel.__init__  s       "("5v7HZ +l:v~{KK!&$+v7HII!&$+v7HII 	r.   c                     | j         S r   )r_  r  s    r,   r  zMvpModel.get_input_embeddings  s
    {r.   c                 X    || _         | j         | j        _        | j         | j        _        d S r   )r_  r`  r   ra  r  s     r,   r  zMvpModel.set_input_embeddings  s'    $(K!$(K!!!r.   c                     | j         S r   )r`  r  s    r,   get_encoderzMvpModel.get_encoder  
    |r.   c                     | j         S r   ra  r  s    r,   get_decoderzMvpModel.get_decoder  rf  r.   c                    | j         s
J d            |                     d           | j        j                            d           | j        j                            d           | j        j                            d           d S )NzHIf you want to use lightweight tuning, make sure that `use_prompt=True`.FT)r   requires_grad_r`  r   ra  r   r  s    r,   set_lightweight_tuningzMvpModel.set_lightweight_tuning  s~    jj jjjjE"""%44T:::%44T:::&55d;;;;;r.   )
checkpointoutput_typer   expected_outputNr!   rj   decoder_input_idsdecoder_attention_maskr  decoder_head_maskrI  encoder_outputsrJ  r   decoder_inputs_embedsr   rm   r!  r"  rn   c                    |8|6|t          d          t          || j        j        | j        j                  }||n| j        j        }||n| j        j        }||n| j        j        }||n| j        j        }|| 	                    ||||
|||          }ne|rct          |t                    sNt          |d         t          |          dk    r|d         nd t          |          dk    r|d         nd           }|                     |||d         ||||	|||||          }|s||z   S t          |j        |j        |j        |j        |j        |j        |j        |j                  S )	NzIf no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.)r!   rj   r  r   rm   r!  r"  r   r   r5   r*  r!   rj   r   r   r  rI  rJ  r   r   rm   r!  r"  )r+  rJ  decoder_hidden_statesdecoder_attentionsrM  encoder_last_hidden_stater   encoder_attentions)r)   r-   r   r"   r#   rm   r!  r   r-  r`  r   r   r.  ra  r   r+  rJ  rg   r,  rM  )r9   r!   rj   rp  rq  r  rr  rI  rs  rJ  r   rt  r   rm   r!  r"  decoder_outputss                    r,   rD   zMvpModel.forward  s   6 $)>)F  U   !34;3T[5W! ! 2C1N--TXT_Tq$8$D  $+Jj 	 "+!6IIDK<Q	%0%<kk$+B]""ll#-#+"3%9' +  OO  	O_!M!M 	-"1!"4474H4H14L4Loa00RV14_1E1E1I1I?1--t  O ,,'1"1!"4#1'!5+//!5# ' 
 
  	5"_44!-?+;"1"?.9,=&5&G"1"?.9	
 	
 	
 		
r.   NNNNNNNNNNNNNNN)rH   rI   rJ   "_keys_to_ignore_on_load_unexpected_tied_weights_keysr   r8   r  r  re  ri  rl  r   MVP_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr?   r?  r   rM   r   r   r   r   r   rD   rN   rO   s   @r,   rZ  rZ    s^       
 +>)>&79VWy        0 0 0
    < < < +*+?@@&&$.	   '+158<=A,0487;=A=A59=A$(,0/3&*!U
 U
#U
 !.U
 $E$45	U

 !))9 :U
 EL)U
 $EL1U
 'u|4U
 "$u'8"9:U
 "$u'8"9:U
   12U
  ((9:U
 D>U
 $D>U
 'tnU
  d^!U
" 
u((	)#U
 U
 U
  A@U
 U
 U
 U
 U
r.   rZ  z[The MVP Model with a language modeling head. Can be used for various text generation tasks.c            (           e Zd Zg dZdef fdZd Zd Zd#dede	e         d	e
j        f fd
Zded	dfdZd Zd Zd Z ee           eee           ee          	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d$dej        de	ej                 de	ej                 de	ej                 de	ej                 de	ej                 de	ej                 de	eej                          de	eej                          de	ej                 de	ej                 de	ej                 de	e         de	e         de	e         de	e         d	eeef         f"d                                     Z dej        fd!Z!e"d"             Z# xZ$S )%MvpForConditionalGeneration)r\  r]  lm_head.weightr   c                 l   t                                          |           t          |          | _        |                     dt          j        d| j        j        j        f                     t          j
        |j        | j        j        j        d          | _        |                                  d S )Nr[  r   FrY   )r7   r8   rZ  r   register_bufferr?   rw   r_  r1   r   r\   r   lm_headr  r   s     r,   r8   z$MvpForConditionalGeneration.__init__N  s       f%%
0%+q$*BSBb>c2d2deeey1B1QX]^^^ 	r.   c                 4    | j                                         S r   )r   re  r  s    r,   re  z'MvpForConditionalGeneration.get_encoderW      z%%'''r.   c                 4    | j                                         S r   )r   ri  r  s    r,   ri  z'MvpForConditionalGeneration.get_decoderZ  r  r.   Nnew_num_tokenspad_to_multiple_ofrn   c                 v    t                                          ||          }|                     |           |S r   )r7   resize_token_embeddings_resize_final_logits_bias)r9   r  r  new_embeddingsr:   s       r,   r  z3MvpForConditionalGeneration.resize_token_embeddings]  s7    88I[\\&&~666r.   c                    | j         j        d         }||k    r| j         d d d |f         }nBt          j        d||z
  f| j         j                  }t          j        | j         |gd          }|                     d|           d S )Nr%   r   r   rp   r[  )r[  r'   r?   rw   r>   rv   r  )r9   r  old_num_tokensnew_bias
extra_biass        r,   r  z5MvpForConditionalGeneration._resize_final_logits_biasb  s    /5b9^++-aaa..@AHHa.)H%IRVRhRopppJy$"8*!E1MMMH0(;;;;;r.   c                     | j         S r   r  r  s    r,   get_output_embeddingsz1MvpForConditionalGeneration.get_output_embeddingsk  rf  r.   c                     || _         d S r   r  r9   r  s     r,   set_output_embeddingsz1MvpForConditionalGeneration.set_output_embeddingsn      %r.   c                 l    | j                                          | j                            d           d S r>  r   rl  r  rk  r  s    r,   rl  z2MvpForConditionalGeneration.set_lightweight_tuningq  2    
))+++##E*****r.   rn  r   r!   rj   rp  rq  r  rr  rI  rs  rJ  r   rt  labelsr   rm   r!  r"  c                    ||n| j         j        }|G|rt                              d           d}|'|%t	          || j         j        | j         j                  }|                     |||||||||	|
|||||          }|                     |d                   | j	        z   }d}|Kt                      } ||                    d| j         j                  |                    d                    }|s|f|dd         z   }||f|z   n|S t          |||j        |j        |j        |j        |j        |j        |j        	  	        S )	a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Returns:
        NzJThe `use_cache` argument is changed to `False` since `labels` is provided.F)rj   rp  rs  rq  r  rr  rI  rJ  r   rt  r   rm   r!  r"  r   r%   r   	losslogitsrJ  rw  rx  rM  ry  r   rz  )r   r-  rN  warningr-   r"   r#   r   r  r[  r	   rc   r  r   rJ  rw  rx  rM  ry  r   rz  )r9   r!   rj   rp  rq  r  rr  rI  rs  rJ  r   rt  r  r   rm   r!  r"  r   	lm_logitsmasked_lm_lossloss_fctoutputs                         r,   rD   z#MvpForConditionalGeneration.forwardu  s   < &1%<kk$+B] mklllI (-B-J$6DK4dk6X% %! **)/+#9/!5+'"7/!5#  
 
" LL,,t/EE	'))H%XinnR9O&P&PRXR]R]^`RaRabbN 	Z\GABBK/F3A3M^%..SYY#3")"?&9$5&-&G")"?&9

 

 

 
	
r.   c                 L    t          || j        j        | j        j                  S r   )r-   r   r"   r#   )r9   r  s     r,   %prepare_decoder_input_ids_from_labelszAMvpForConditionalGeneration.prepare_decoder_input_ids_from_labels  s    !&$+*BDKDfgggr.   c                 z    d}| D ]4}|t          fd|d d         D                       |dd          z   fz  }5|S )Nr   c              3   t   K   | ]2}|                     d                     |j                            V  3dS r   Nindex_selectrx   r>   r  
past_statebeam_idxs     r,   r)  z=MvpForConditionalGeneration._reorder_cache.<locals>.<genexpr>  sC      rrU_j--aZ=N1O1OPPrrrrrrr.   r5   r3  rJ  r  reordered_past
layer_pasts    `  r,   _reorder_cachez*MvpForConditionalGeneration._reorder_cache  sm    ) 	 	JrrrrcmnpopnpcqrrrrrQRR.! NN r.   r   NNNNNNNNNNNNNNNN)%rH   rI   rJ   r~  r   r8   re  ri  rL   r   r   r   r  r  r  r  rl  r   r  r   r   r  r   "MVP_CONDITIONAL_GENERATION_EXAMPLEr?   r?  rM   r   r   r   r   r   rD   r  staticmethodr  rN   rO   s   @r,   r  r  H  s        jiiy      ( ( (( ( ( c xX[} hjht      
< < < < < <  & & &+ + + +*+?@@?YYY:;; '+158<=A,0487;=A=A59=A-1$(,0/3&*#L
 L
#L
 !.L
 $E$45	L

 !))9 :L
 EL)L
 $EL1L
 'u|4L
 "$u'8"9:L
 "$u'8"9:L
   12L
  ((9:L
 )*L
 D>L
 $D>L
  'tn!L
" d^#L
$ 
uo%	&%L
 L
 L
 <; ZY A@L
\hEL h h h h   \    r.   r  z
    Mvp model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
    tasks.
    c            %           e Zd ZddgZdef fdZd Z ee           e	e
          	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddej        deej                 d	eej                 d
eej                 deej                 deej                 deej                 deeej                          deej                 deej                 deej                 dee         dee         dee         dee         deeef         f d                        Z xZS )MvpForSequenceClassificationr\  r]  r   c                      t                      j        |fi | t          |          | _        t	          |j        |j        |j        |j                  | _        | 	                                 d S r   )
r7   r8   rZ  r   r   r   
num_labelsclassifier_dropoutclassification_headr  )r9   r   kwargsr:   s      r,   r8   z%MvpForSequenceClassification.__init__  sq    **6***f%%
#8NN%	$
 $
  	r.   c                 l    | j                                          | j                            d           d S r>  )r   rl  r  rk  r  s    r,   rl  z3MvpForSequenceClassification.set_lightweight_tuning  s3    
))+++ //66666r.   Nr!   rj   rp  rq  r  rr  rI  rs  r   rt  r  r   rm   r!  r"  rn   c                    ||n| j         j        }|d}||	t          d| j        j                   |                     |||||||||	|
||||          }|d         }|                    | j         j                                      |j	                  }t          t          j        |                    d                              dk    rt          d          ||ddf                             |                    d          d|                    d                    dddddf         }|                     |          }d}|n| j         j        p| j         j        dk    rd	| j         _        nS| j         j        dk    r7|j        t          j        k    s|j        t          j        k    rd
| j         _        nd| j         _        | j         j        d	k    r\t/                      }| j         j        dk    r1 ||                                |                                          }n |||          }n| j         j        d
k    rLt3                      } ||                    d| j         j                  |                    d                    }n*| j         j        dk    rt5                      } |||          }|s|f|dd         z   }||f|z   n|S t7          |||j        |j        |j        |j        |j         |j!        |j"        	  	        S )a3  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        NFz8Passing input embeddings is currently not supported for rj   rp  rq  r  rr  rI  rs  r   rt  r   rm   r!  r"  r   r   z7All examples must have the same number of <eos> tokens.r%   
regressionsingle_label_classificationmulti_label_classificationr  )#r   r-  NotImplementedErrorr:   rH   r   eqeos_token_idrx   r>   r.  r?   unique_consecutivesumr)   rc   ru   r  problem_typer  r=   rA   rL   r
   squeezer	   r   r   rJ  rw  rx  rM  ry  r   rz  )r9   r!   rj   rp  rq  r  rr  rI  rs  r   rt  r  r   rm   r!  r"  r   rg   eos_masksentence_representationr  r  r  r  s                           r,   rD   z$MvpForSequenceClassification.forward  s?   2 &1%<kk$+B]I!:%d4>Kbdd   **)/#9/!5+'"7/!5#  
 
   
<< 899<<]=QRRu'Q8899A==VWWW"/!!!"<"A"A-BTBTUVBWBWY[]j]o]opr]s]s"t"tAAr111H#
 ))*ABB{'/;)Q../;DK,,[+a//V\UZ5O5OSYS_chclSlSl/LDK,,/KDK,{'<77"99;)Q..#8FNN$4$4fnn6F6FGGDD#8FF33DD)-JJJ+--xB0F G GUWYY)-III,..x// 	FY,F)-)9TGf$$vE.#3")"?&9$5&-&G")"?&9

 

 

 
	
r.   r|  )rH   rI   rJ   r~  r   r8   rl  r   r  r   "MVP_SEQUENCE_CLASSIFICATION_SAMPLEr?   r?  r   rM   r   r   r   r   r   r   rD   rN   rO   s   @r,   r  r    s         89VWy      7 7 7 +*+?@@:;; '+158<=A,0487;=A59=A-1$(,0/3&*!_
 _
#_
 !._
 $E$45	_

 !))9 :_
 EL)_
 $EL1_
 'u|4_
 "$u'8"9:_
   12_
  ((9:_
 )*_
 D>_
 $D>_
 'tn_
  d^!_
" 
u55	6#_
 _
 _
 <; A@_
 _
 _
 _
 _
r.   r  z
    MVP Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer
    on top of the hidden-states output to compute `span start logits` and `span end logits`).
    c            '       &    e Zd ZddgZ fdZd Z ee           ee	          	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dde
j        dee
j                 dee
j                 d	ee
j                 d
ee
j                 dee
j                 dee
j                 deee
j                          dee
j                 dee
j                 dee
j                 dee
j                 dee         dee         dee         dee         deeef         f"d                        Z xZS )MvpForQuestionAnsweringr\  r]  c                    t                                          |           d|_        |j        | _        t          |          | _        t          j        |j        |j                  | _        | 	                                 d S r4   )
r7   r8   r  rZ  r   r   r\   hidden_size
qa_outputsr  r   s     r,   r8   z MvpForQuestionAnswering.__init__^  sm        +f%%
)F$68IJJ 	r.   c                 l    | j                                          | j                            d           d S r>  )r   rl  r  rk  r  s    r,   rl  z.MvpForQuestionAnswering.set_lightweight_tuningj  s2    
))+++&&u-----r.   Nr!   rj   rp  rq  r  rr  rI  rs  start_positionsend_positionsr   rt  r   rm   r!  r"  rn   c                    ||n| j         j        }|	|
d}|                     ||||||||||||||          }|d         }|                     |          }|                    dd          \  }}|                    d                                          }|                    d                                          }d}|	|
t          |	                                          dk    r|	                    d          }	t          |
                                          dk    r|
                    d          }
|                    d          }|		                    d|          }	|
	                    d|          }
t          |          } |||	          } |||
          }||z   d	z  }|s||f|dd         z   }||f|z   n|S t          ||||j        |j        |j        |j        |j        |j        |j        

  
        S )a  
        start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
            are not taken into account for computing the loss.
        NFr  r   r   r%   rp   )ignore_indexr5   )
r  start_logits
end_logitsrJ  rw  rx  rM  ry  r   rz  )r   r-  r   r  r   r  re   r.  ru   r   r	   r   rJ  rw  rx  rM  ry  r   rz  )r9   r!   rj   rp  rq  r  rr  rI  rs  r  r  r   rt  r   rm   r!  r"  r   sequence_outputr  r  r  
total_lossignored_indexr  
start_lossend_lossr  s                               r,   rD   zMvpForQuestionAnswering.forwardn  sZ   > &1%<kk$+B]&=+DI**)/#9/!5+'"7/!5#  
 
" "!*11#)<<r<#:#: j#++B//::<<''++6688

&=+D?''))**Q.."1"9"9""="==%%''((1,, - 5 5b 9 9(--a00M-33A}EEO)//=AAM']CCCH!,@@Jx
M::H$x/14J 	R F 0:/EZMF**6Q2%!#3")"?&9$5&-&G")"?&9
 
 
 	
r.   r  )rH   rI   rJ   r~  r8   rl  r   r  r   MVP_QUESTION_ANSWERING_SAMPLEr?   rM   r   r?  r   r   r   r   r   r   rD   rN   rO   s   @r,   r  r  T  s	        89VW
 
 
 
 
. . . +*+?@@566 #'158<=A,0487;=A6:4859=A$(,0/3&*#\
 \
<\
 !.\
 $E$45	\

 !))9 :\
 EL)\
 $EL1\
 'u|4\
 "$u'8"9:\
 "%"23\
   01\
   12\
  ((9:\
 D>\
 $D>\
  'tn!\
" d^#\
$ 
u99	:%\
 \
 \
 76 A@\
 \
 \
 \
 \
r.   r  c                   (     e Zd ZdZ fdZd Z xZS )MvpDecoderWrapperz
    This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
    used in combination with the [`EncoderDecoderModel`] framework.
    c                 r    t                                          |           t          |          | _        d S r   )r7   r8   rA  ra  r   s     r,   r8   zMvpDecoderWrapper.__init__  s.       !&))r.   c                      | j         |i |S r   rh  )r9   argsr  s      r,   rD   zMvpDecoderWrapper.forward  s    t|T,V,,,r.   )rH   rI   rJ   rK   r8   rD   rN   rO   s   @r,   r  r    sQ         
* * * * *- - - - - - -r.   r  c                        e Zd ZdgZ fdZd Zd Zd Zd Zd Z	d Z
d	 Z eee
          	 	 	 	 	 	 	 	 	 	 	 	 	 ddej        deej                 deej                 deej                 deej                 deej                 deeej                          deej                 deej                 dee         dee         dee         dee         deeef         fd            Zed             Z xZS )MvpForCausalLMr  c                 *   t          j        |          }d|_        d|_        t	                                          |           t          |          | _        t          j	        |j
        |j        d          | _        |                                  d S )NTFrY   )copydeepcopyrV   is_encoder_decoderr7   r8   r  r   r   r\   r  r  r  r  r   s     r,   r8   zMvpForCausalLM.__init__  s    v&& $)!   &v..
y!3V5FUSSS 	r.   c                 $    | j         j        j        S r   r   ra  r   r  s    r,   r  z#MvpForCausalLM.get_input_embeddings  s    z!..r.   c                 (    || j         j        _        d S r   r  r  s     r,   r  z#MvpForCausalLM.set_input_embeddings  s    */
'''r.   c                     | j         S r   r  r  s    r,   r  z$MvpForCausalLM.get_output_embeddings  rf  r.   c                     || _         d S r   r  r  s     r,   r  z$MvpForCausalLM.set_output_embeddings  r  r.   c                     || j         _        d S r   r   ra  )r9   ra  s     r,   set_decoderzMvpForCausalLM.set_decoder  s    $
r.   c                     | j         j        S r   r  r  s    r,   ri  zMvpForCausalLM.get_decoder  s    z!!r.   c                 l    | j                                          | j                            d           d S r>  r  r  s    r,   rl  z%MvpForCausalLM.set_lightweight_tuning  r  r.   r  Nr!   rj   r   r   r  rI  rJ  r   r  r   rm   r!  r"  rn   c                    ||n| j         j        }||n| j         j        }||n| j         j        }| j                            |||||||||
|||          }|                     |d                   }d}|	Kt                      } ||                    d| j         j	                  |	                    d                    }|s|f|dd         z   }||f|z   n|S t          |||j        |j        |j        |j                  S )a  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                if the model is configured as a decoder.
            encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
                in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
                tensors are only required when the model is used as a decoder in a Sequence to Sequence model.

                Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
                cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.

        Returns:

        Example:

        ```python
        >>> from transformers import AutoTokenizer, MvpForCausalLM

        >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
        >>> model = MvpForCausalLM.from_pretrained("RUCAIBox/mvp", add_cross_attention=False)

        >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
        >>> outputs = model(**inputs)

        >>> logits = outputs.logits
        >>> list(logits.shape)
        [1, 8, 50267]
        ```Nrv  r   r%   r   )r  r  rJ  rg   r,  rM  )r   rm   r!  r-  r   ra  r  r	   rc   r  r   rJ  rg   r,  rM  )r9   r!   rj   r   r   r  rI  rJ  r   r  r   rm   r!  r"  r   r  r  r  r  s                      r,   rD   zMvpForCausalLM.forward  sR   J 2C1N--TXT_Tq$8$D  $+Jj 	 &1%<kk$+B] *$$)"7#9!5+'/!5# % 
 
 gaj))'))H8FKKDK,BCCV[[QS__UUD 	DY,F'+'7D7V##VC0#3!/)$5
 
 
 	
r.   c                 T    d}| D ]!}|t          fd|D                       fz  }"|S )Nr   c              3   t   K   | ]2}|                     d                     |j                            V  3dS r  r  r  s     r,   r)  z0MvpForCausalLM._reorder_cache.<locals>.<genexpr>  sC      nnU_j--aZ=N1O1OPPnnnnnnr.   r  r  s    `  r,   r  zMvpForCausalLM._reorder_cache  sQ    ) 	 	Jnnnncmnnnnn NN r.   )NNNNNNNNNNNNN)rH   rI   rJ   r~  r8   r  r  r  r  r  ri  rl  r   r   r  r?   r?  r   rM   r   r   r   r   r   rD   r  r  rN   rO   s   @r,   r  r    s%       *+
 
 
 
 
/ / /0 0 0  & & &% % %" " "+ + + +L[jkkk '+15=A>B,07;=A59-1$(,0/3&*L
 L
#L
 !.L
  ((9:	L

 !)): ;L
 EL)L
 'u|4L
 "$u'8"9:L
   12L
 )*L
 D>L
 $D>L
 'tnL
 d^L
 
u77	8L
 L
 L
 lkL
\   \    r.   r  )HrK   r  r	  typingr   r   r   r   r?   torch.utils.checkpointr   torch.nnr   r	   r
   activationsr   
generationr   modeling_attn_mask_utilsr   r   modeling_outputsr   r   r   r   r   r   r   modeling_utilsr   utilsr   r   r   r   r   r   configuration_mvpr   
get_loggerrH   rN  r  r  r  rM   rL   r-   r   r0   ModulerQ   r   r   r   r   r   MVP_START_DOCSTRINGr  r  r  r  r   rA  rZ  r  r  r  r  r  r   r.   r,   <module>r     s       / / / / / / / / / / / /            A A A A A A A A A A ! ! ! ! ! ! ) ) ) ) ) ) e e e e e e e e                  . - - - - -                ) ( ( ( ( ( 
	H	%	%$  & %, c [^    "8 8 8 8 8BL 8 8 8,XB XB XB XB XB29 XB XB XBvE E E E Ebi E E EPz z z z zbi z z z|    BI   0    	   2       6  _ B& "<& "8"! JA
 A
 A
 A
 A
# A
 A
 A
HW
 W
 W
 W
 W
# W
 W
 W
t W C
 C
 C
 C
 C
! C
 C
	 C
L acv G G G G G"4o G G GT   u
 u
 u
 u
 u
#5 u
 u
 u
p   q
 q
 q
 q
 q
0 q
 q
 q
j- - - - -* - - -{ { { { {' { { { { {r.   