
    gM                       d Z ddlZddlZddlZddlmZ ddlmZmZm	Z	 ddl
Z
ddlZ
ddlmZ ddl
mZ ddlmZmZmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZmZmZmZ ddl m!Z!m"Z" ddl#m$Z$m%Z%m&Z& ddl'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0 ddl1m2Z2m3Z3 ddl4m5Z5  e-            rddl6m7Z7  e/j8        e9          Z:dZ;dZ<d Z= G d dej>                  Z? G d de?          Z@ G d de?          ZA G d dej>                  ZBe?e@eAdZC G d  d!ej>                  ZD G d" d#e!          ZEe G d$ d%e(                      ZFd&ZGd'ZHd(ZId)ZJ e*d*eG           G d+ d,eE                      ZK e*d-eG           G d. d/eEe                      ZL e*d0eG           G d1 d2eEe                      ZM e*d3eG           G d4 d5eE                      ZN e*d6eG           G d7 d8eE                      ZO e*d9eG           G d: d;eE                      ZPdS )<zPyTorch OpenAI GPT-2 model.    N)	dataclass)OptionalTupleUnion)version)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)GenerationMixin)#_prepare_4d_attention_mask_for_sdpa*_prepare_4d_causal_attention_mask_for_sdpa))BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentionsQuestionAnsweringModelOutput SequenceClassifierOutputWithPastTokenClassifierOutput)PreTrainedModelSequenceSummary)Conv1D find_pruneable_heads_and_indicesprune_conv1d_layer)	ModelOutputadd_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardget_torch_versionis_flash_attn_2_available#is_flash_attn_greater_or_equal_2_10loggingreplace_return_docstrings)assert_device_mapget_device_map   )
GPT2Config)_flash_attention_forwardzopenai-community/gpt2r'   c                    	 ddl }ddl}n)# t          $ r t                              d            w xY wt
          j                            |          }t                              d|            |j	        
                    |          }g }g }|D ]|\  }	}
t                              d|	 d|
            |j	                            ||	          }|                    |	           |                    |                                           }t          ||          D ]\  }	}|	dd         }	|	                    d          }	| }|	D ]}|                    d	|          r|                    d
|          }n|g}|d         dk    s|d         dk    rt#          |d          }nr|d         dk    rt#          |d          }nU|d         dk    s|d         dk    r't#          ||d                   }t#          |d          }nt#          ||d                   }t%          |          dk    rt'          |d                   }||         }	 |j        |j        k    r t+          d|j         d|j         d          n/# t*          $ r"}|xj        |j        |j        fz  c_         d}~ww xY wt                              d|	            t/          j        |          |_        | S )z&Load tf checkpoints in a pytorch modelr   NzLoading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.z&Converting TensorFlow checkpoint from zLoading TF weight z with shape    /z[A-Za-z]+\d+z(\d+)wgweightbbiaswpewte   r&   zPointer shape z and array shape z mismatchedzInitialize PyTorch weight )re
tensorflowImportErrorloggererrorospathabspathinfotrainlist_variablesload_variableappendsqueezezipsplit	fullmatchgetattrlenintshape
ValueErrorargstorch
from_numpydata)modelconfiggpt2_checkpoint_pathr4   tftf_path	init_varsnamesarraysnamerH   arraypointerm_namescope_namesnumes                    b/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/gpt2/modeling_gpt2.pyload_tf_weights_in_gpt2r^   C   s(   				   Q	
 	
 	
 	 goo233G
KKBBBCCC''00IEF  ' 'eBBB5BBCCC&&w55Temmoo&&&&5&)) / /eABBxzz# 	' 	'F||OV44 ' hhx88%h1~$$A#(=(=!'844Q3&&!'622Q5((KNe,C,C!';q>::!'844!';q>::;1$$+a.))!#,	}++ !j'-!j!jRWR]!j!j!jkkk , 	 	 	FFw}ek22FF	 	777888'..Ls     &10I66
J" JJ"c                   r    e Zd Zd fd	Zd ZddZddZd Zd Z	 	 	 	 	 	 	 dd	e	e
ej                          d
e	e
ej                          de	ej                 de	ej                 de	ej                 de	ej                 de	e         de	e         de
eej        e
ej                 f         df         fdZ xZS )GPT2AttentionFNc           	      |   t                                                       || _        |j        }|                     dt          j        t          j        ||ft
          j                            	                    dd||          d           |                     dt          j
        d          d           |j        | _        |j        | _        | j        | j        z  | _        | j        | _        | j        | j        z  | j        k    r t#          d| j         d	| j         d
          |j        | _        || _        |j        | _        || _        |j        | _        | j        rBt/          d| j        z  | j                  | _        t/          | j        | j                  | _        n"t/          d| j        z  | j                  | _        t/          | j        | j                  | _        t7          j        |j                  | _        t7          j        |j                  | _         d| _!        tE                      | _#        d S )Nr0   dtyper&   F)
persistentmasked_biasg     z=`embed_dim` must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).r3   r   T)$super__init__rO   max_position_embeddingsregister_bufferrK   trilonesboolviewtensorhidden_size	embed_dimnum_attention_heads	num_headshead_dim
split_sizerI   scale_attn_weightsis_cross_attentionscale_attn_by_inverse_layer_idx	layer_idxreorder_and_upcast_attnr   c_attnq_attnc_projr   Dropout
attn_pdropattn_dropoutresid_pdropresid_dropout	is_causalsetpruned_heads)selfrO   rv   rx   max_positions	__class__s        r]   rg   zGPT2Attention.__init__|   s   6Juz=-"@
SSSTTYY1m]   	 	
 	
 	
 	]EL,>,>5QQQ+3$.8.=4>)T^;;'PTP^ ' 'N' ' '  
 #)";"4 06/U,"'-'E$" 	E T^!3T^DDDK @@DKK T^!3T^DDDKT^T^<<Jv'899Z(:;;EE    c                 
   t          |          dk    rd S t          || j        | j        | j                  \  }}t          j        ||| j        z   |d| j        z  z   g          }t          | j	        |d          | _	        t          | j
        |d          | _
        | j        | j        z  | j        t          |          z
  z  | _        | j        t          |          z
  | _        | j                            |          | _        d S )Nr   r3   r&   dim)rF   r   rr   rs   r   rK   catrt   r   rz   r|   union)r   headsindex
index_attns       r]   prune_headszGPT2Attention.prune_heads   s    u::??F7t~t}^b^oppuYut'>T_I\@]^__
 )jaHHH(eCCC  ?dn<RUV[R\R\A\]#e**4 -33E::r   c                 ~   t          j        ||                    dd                    }| j        r;|t          j        g |                    d          dz  |j        |j                  z  }| j        r|t          | j
        dz             z  }| j        s|                    d          |                    d          }}| j        d d d d ||z
  |d |f         }	t          j        |j                  j        }
t          j        g |
|j        |j                  }
t          j        |	|                    |j                  |
          }|||z   }t"          j                            |d          }|                    |j                  }|                     |          }|||z  }t          j        ||          }||fS )N      ?rc   devicer&   r   )rK   matmul	transposeru   fullsizerc   r   rw   floatrx   rv   r0   finfominwheretor   
functionalsoftmaxtyper   )r   querykeyvalueattention_mask	head_maskattn_weightsquery_length
key_lengthcausal_mask
mask_valueattn_outputs               r]   _attnzGPT2Attention._attn   s   |E3==R+@+@AA" 	'%*EJJrNNc)1CLL_+ + + L
 / 	D'%0B*C*CCL& 	e',zz"~~sxx||*L)AAAqqq*|*Cj*PR]S]R]$]^K\%788<J B
,:LUaUhiiiJ ;{LOOLDV4W4WYcddL%'.8L},,\r,BB $((55((66  ')3Ll<77L((r   c                    |                                 \  }}}}	|                                 \  }
}
}}
t          j        ||z  ||t          j        |j                  }d}| j        r(|t          |                     d                    dz  z  }| j        r|t          | j        dz             z  }t          j	        
                    |j        j        d          5  |                    d||	          |                    dd                              d|	|          }}t          j        ||                                |                                d	|
          }|                    ||||          }d d d            n# 1 swxY w Y   | j        s|                     d          |                     d          }}| j        d d d d ||z
  |d |f         }t          j        |j                  j        }t          j        ||j                                      |j                  }t          j        |||          }|||z   }t.          j                            |d          }|j        t          j        k    rt5          d          |                    |j                  }|                     |          }|||z  }t          j        ||          }||fS )Nr         ?r   r   r&   F)enabledr   r   )betaalpharb   r   zDError with upcasting, attn_weights does not have dtype torch.float32)r   rK   emptyfloat32r   ru   r   rw   rx   ampautocastr   reshaper   baddbmmrv   r0   r   rc   r   rn   r   r   r   r   r   RuntimeErrorr   r   )r   r   r   r   r   r   bszrr   	q_seq_lendk_	k_seq_lenr   scale_factorqkr   r   r   r   r   s                        r]   _upcast_and_reordered_attnz(GPT2Attention._upcast_and_reordered_attn   s   (-

%Y	2 XXZZ1i {3?IyPUP]fkfrsss " 	9E%**R..11S88L/ 	6E$.1"4555L Y 15AA 	V 	V==Y33S]]2r5J5J5R5RSUWY[d5e5eqA =qwwyy!''))RS[ghhhL'//Y	9UUL	V 	V 	V 	V 	V 	V 	V 	V 	V 	V 	V 	V 	V 	V 	V
 & 	N',zz"~~sxx||*L)AAAqqq*|*Cj*PR]S]R]$]^K\%788<J j8JKKKNN|ObccJ ;{L*MML%'.8L},,\r,BB ..efff#((55((66  ')3Ll<77L((s   BE??FFc                     |                                 dd         ||fz   }|                    |          }|                    dddd          S )zJ
        Splits hidden_size dim into attn_head_size and num_heads
        Nr   r   r3   r&   r   )r   rm   permuter   rn   rr   attn_head_size	new_shapes        r]   _split_headszGPT2Attention._split_heads  sM     KKMM#2#&)^)DD	Y''~~aAq)))r   c                     |                     dddd                                          }|                                dd         ||z  fz   }|                    |          S )zS
        Merges attn_head_size dim and num_attn_heads dim into hidden_size
        r   r3   r&   r   Nr   )r   
contiguousr   rm   r   s        r]   _merge_headszGPT2Attention._merge_heads  s\     1a++6688KKMM#2#&)n*D)FF	{{9%%%r   hidden_states
layer_pastr   r   encoder_hidden_statesencoder_attention_mask	use_cacheoutput_attentionsreturn.c	                    |it          | d          st          d          |                     |          }	|                     |                              | j        d          \  }
}|}n3|                     |                              | j        d          \  }	}
}|                     |	| j        | j                  }	|                     |
| j        | j                  }
|                     || j        | j                  }|5|\  }}t          j
        ||
fd          }
t          j
        ||fd          }|du r|
|f}nd }| j        r|                     |	|
|||          \  }}n|                     |	|
|||          \  }}|                     || j        | j                  }|                     |          }|                     |          }||f}|r||fz  }|S )Nr{   If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`.r3   r   r   T)hasattrrI   r{   rz   rC   rt   r   rr   rs   rK   r   ry   r   r   r   r|   r   )r   r   r   r   r   r   r   r   r   r   r   r   past_key
past_valuepresentr   r   outputss                     r]   forwardzGPT2Attention.forward#  s    !,4**  p  
 KK..E%:;;AA$/WXAYYJC3NN $M : : @ @VW @ X XE3!!%GGT^T]CC!!%GG!#- Hj)XsO444CIz51r:::EElGGG' 	a(,(G(GsTY[ikt(u(u%K(,

5#unV_(`(`%K''T^T]SSkk+..((55( 	'&Gr   )FN)NNNNNNNFF)__name__
__module____qualname__rg   r   r   r   r   r   r   r   rK   FloatTensorTensorrl   r   r   __classcell__r   s   @r]   r`   r`   {   sz       *" *" *" *" *" *"X; ; ;&) &) &) &)P2) 2) 2) 2)h* * *& & & 596:158<>B$),13 3e&7 893 U5<013 !!23	3
 E-.3  (53 !)): ;3 D>3 $D>3 
uU\5#667<	=3 3 3 3 3 3 3 3r   r`   c                   R    e Zd ZdZ fdZ	 	 	 	 	 	 	 ddeeej                          deeej	                          deej                 deej                 d	eej	                 d
eej                 dee
         dee
         deeej	        eej	                 f         df         fdZ xZS )GPT2FlashAttention2aD  
    GPT2 flash attention module. This module inherits from `GPT2Attention` as the weights of the module stays
    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
    flash attention and deal with padding tokens in case the input contains any of them.
    c                 b     t                      j        |i | t                       | _        d S N)rf   rg   r!   _flash_attn_uses_top_left_maskr   rJ   kwargsr   s      r]   rg   zGPT2FlashAttention2.__init__a  s9    $)&)))
 3V2W2W.W+++r   NFr   r   r   r   r   r   r   r   r   .c	           
          |                                 \  }	}
}
|it          | d          st          d          |                     |          }|                     |                              | j        d          \  }}|}n3|                     |                              | j        d          \  }}}|                     || j        | j	                  }|                     || j        | j	                  }|                     || j        | j	                  }|@|d         }|d         }t          j        ||fd          }t          j        ||fd          }d }|du r||f}|j        d         }|j        d         }|                    dd                              |	|| j        | j	                  }|                    dd                              |	|| j        | j	                  }|                    dd                              |	|| j        | j	                  }| j        r| j        j        nd	}|j        t          j        k    rt          j                    rt          j                    }n3t          | j        d
          r| j        j        }n| j        j        j        }t4                              d| d           |                    |          }|                    |          }|                    |          }t;          ||||||| j        | j                  }|                     |	|| j        | j	        z            }|                     |          }| !                    |          }||f}|r||fz  }|S )Nr{   r   r3   r   r   r&   r   T        _pre_quantization_dtypezThe input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in .)dropoutr   use_top_left_mask)"r   r   rI   r{   rz   rC   rt   r   rr   rs   rK   r   rH   r   rm   trainingr   prc   r   is_autocast_enabledget_autocast_gpu_dtyperO   r   r|   r.   r7   warning_oncer   r(   r   r   r   r   )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   tgt_lenr   target_dtyper   attn_weights_reshapedr   s                           r]   r   zGPT2FlashAttention2.forwardi  s    "&&((	Q ,4**  p  
 KK..E%:;;AA$/WXAYYJC3NN $M : : @ @VW @ X XE3!!%GGT^T]CC!!%GG!!!}H#AJ)XsO444CIz51r:::EElG{1~)A, 1%%**3dndm\\mmAq!!&&sGT^T]SS1%%**3WW.2mDt(** ;%-''(** 8$;==&?@@ 8#{B#{17$ $ $ $   HH\**E&&&&CHH\**E. n"A	
 	
 	
 !, 3 3Ct~X\XeGe f fkk"788((55( 	0-//Gr   r   r   r   r   __doc__rg   r   r   rK   r   r   rl   r   r   r   r   s   @r]   r   r   Y  s>        X X X X X 596:158<>B$),1^ ^e&7 89^ U5<01^ !!23	^
 E-.^  (5^ !)): ;^ D>^ $D>^ 
uU\5#667<	=^ ^ ^ ^ ^ ^ ^ ^r   r   c                   V    e Zd ZdZ fdZ	 	 	 	 	 	 	 ddeeej                          deeej	                          deej                 deej                 d	eej	                 d
eej                 dee
         dee
         deeej	        eej	                 f         df         f fdZ xZS )GPT2SdpaAttentionz
    GPT2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
    `GPT2Attention` as the weights of the module stays untouched. The only changes are on the forward pass
    to adapt to the SDPA API.
    c                      t                      j        |i | t          j        t	                                t          j        d          k     | _        d S )Nz2.2.0)rf   rg   r   parser   require_contiguous_qkvr   s      r]   rg   zGPT2SdpaAttention.__init__  sP    $)&))) '.m4E4G4G&H&H7=Y`KaKa&a###r   NFr   r   r   r   r   r   r   r   r   .c	           
         |s|Ct                               d           t                                          ||||||||          S |                                \  }	}
}|d u}|rit          | d          st          d          |                     |          }|                     |          	                    | j
        d          \  }}|}n3|                     |          	                    | j
        d          \  }}}|                     || j        | j                  }|                     || j        | j                  }|                     || j        | j                  }|@|d         }|d         }t          j        ||fd	          }t          j        ||fd	          }d }|d
u r||f}| j        rN|j        j        dk    r>|<|                                }|                                }|                                }|
|
dk    r|sd
nd}t          j        j                            ||||| j        r| j        j        nd|          }|                    dd                                          }|                    |	|
| j                  }|                     |          }|                     |          }||d fS )Na  `GPT2SdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.)r   r   r   r   r   r   r   r   r{   zIf class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPT2SdpaAttention(..., is_cross_attention=True)`.r3   r   r   r&   r   TcudaFr   )	attn_mask	dropout_pr   )r7   r   rf   r   r   r   rI   r{   rz   rC   rt   r   rr   rs   rK   r   r   r   r   r   r   r   scaled_dot_product_attentionr   r   r   r   rm   rp   r|   r   )r   r   r   r   r   r   r   r   r   r   q_lenr   rv   r   r   r   r   r   r   r   r   r   s                        r]   r   zGPT2SdpaAttention.forward  s     		 5w   77??+%-#&;'=#"3 # 	 	 	 &**,,UA 3$> 	Y4**  t  
 KK..E%:;;AA$/WXAYYJC3NN $M : : @ @VW @ X XE3!!%GGT^T]CC!!%GG !!!}H#AJ)XsO444CIz51r:::EElG & 	'5<+<+F+F>Ke$$&&E..""C$$&&E +2uqyyI[yDDaf	h)FF$-1]Cd')) G 
 
 "++Aq11<<>>!&&sE4>BB kk+..((55GT))r   r   r   r   s   @r]   r   r     sJ        b b b b b 596:158<>B$),1X* X*e&7 89X* U5<01X* !!23	X*
 E-.X*  (5X* !)): ;X* D>X* $D>X* 
uU\5#667<	=X* X* X* X* X* X* X* X* X* X*r   r   c                   Z     e Zd Z fdZdeeej                          dej        fdZ xZ	S )GPT2MLPc                    t                                                       |j        }t          ||          | _        t          ||          | _        t          |j                 | _        t          j
        |j                  | _        d S r   )rf   rg   ro   r   c_fcr|   r   activation_functionactr   r}   r   r   )r   intermediate_sizerO   rp   r   s       r]   rg   zGPT2MLP.__init__6  sl    &	,i88	Y(9::&45z&"455r   r   r   c                     |                      |          }|                     |          }|                     |          }|                     |          }|S r   )r
  r  r|   r   )r   r   s     r]   r   zGPT2MLP.forward>  sL    		-00//M22]33r   )
r   r   r   rg   r   r   rK   r   r   r   r   s   @r]   r  r  5  sj        6 6 6 6 6XeE4E.F%G EL]        r   r  )eagerflash_attention_2sdpac                   v    e Zd Zd fd	Z	 	 	 	 	 	 	 ddeeej                          deeej                          deej                 deej                 deej                 d	eej                 d
ee	         dee	         de
eej                 eeej        eej        df         f                  f         fdZ xZS )	GPT2BlockNc                    t                                                       |j        }|j        |j        nd|z  }t          |j                 }t          j        ||j                  | _	         |||          | _
        t          j        ||j                  | _        |j        r3 ||d|          | _        t          j        ||j                  | _        t          ||          | _        d S )N   eps)rO   rx   T)rO   rv   rx   )rf   rg   ro   n_innerGPT2_ATTENTION_CLASSES_attn_implementationr   	LayerNormlayer_norm_epsilonln_1attnln_2add_cross_attentioncrossattentionln_cross_attnr  mlp)r   rO   rx   ro   	inner_dimattention_classr   s         r]   rg   zGPT2Block.__init__J  s    (&,n&@FNNa+o	01LML&2KLLL	#O6YGGG	L&2KLLL	% 	Z"1/TXdm"n"n"nD!#kv?X!Y!Y!YD9f--r   Fr   r   r   r   r   r   r   r   r   .c	                 
   |}	|                      |          }|                     ||||||          }
|
d         }|
dd          }||	z   }|ot          | d          st          d|  d          |}	|                     |          }|                     ||||||          }|d         }|	|z   }||dd          z   }|}	|                     |          }|                     |          }|	|z   }|r|f|z   }n|f|dd          z   }|S )	N)r   r   r   r   r   r   r&   r!  z'If `encoder_hidden_states` are passed, z` has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`)r   r   r   r   r   r3   )r  r  r   rI   r"  r!  r  r#  )r   r   r   r   r   r   r   r   r   residualattn_outputsr   r   cross_attn_outputsfeed_forward_hidden_statess                  r]   r   zGPT2Block.forwardZ  s    !		-00yy!)/ ! 
 
 #1oqrr"#h. ,4!122  Zd Z Z Z   %H ..}==M!%!4!4-#&;'="3 "5 " " -Q/K${2M 2122 66G 		-00%)XXm%<%<" #== 	5$&0GG$&4Gr   r   r   )r   r   r   rg   r   r   rK   r   r   rl   r   r   r   r   s   @r]   r  r  I  s5       . . . . . .& 596:158<>B$),1; ;e&7 89; U5<01; !!23	;
 E-.;  (5; !)): ;; D>; $D>; 
uU\"HU5<uGXZ]G]A^3^-_$``	a; ; ; ; ; ; ; ;r   r  c                   N     e Zd ZdZeZeZdZdZ	dZ
dgZdZdZdZ fdZd Z xZS )GPT2PreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    transformerTr  past_key_valuesc                 :     t                      j        |i | d S r   )rf   rg   )r   inputsr   r   s      r]   rg   zGPT2PreTrainedModel.__init__  s%    &+F+++++r   c           	      T   t          |t          j        t          f          rQ|j        j                            d| j        j                   |j	        |j	        j        
                                 nt          |t          j                  r\|j        j                            d| j        j                   |j        )|j        j        |j                 
                                 nWt          |t          j                  r=|j	        j        
                                 |j        j                            d           |                                D ]S\  }}|dk    rH|j                            d| j        j        t!          j        d| j        j        z            z             TdS )zInitialize the weights.r   )meanstdNr   zc_proj.weightr3   )
isinstancer   Linearr   r.   rM   normal_rO   initializer_ranger0   zero_	Embeddingpadding_idxr  fill_named_parametersmathsqrtn_layer)r   modulerV   r   s       r]   _init_weightsz!GPT2PreTrainedModel._init_weights  sx   fry&122 	* M&&CT[5R&SSS{& &&(((-- 	*M&&CT[5R&SSS!-"6#56<<>>>-- 	*K""$$$M$$S))) ..00 	s 	sGD!&&Cdk.KdiXY\`\g\oXoNpNp.prrr	s 	sr   )r   r   r   r   r'   config_classr^   load_tf_weightsbase_model_prefixis_parallelizablesupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdparg   rA  r   r   s   @r]   r,  r,    s         
 L-O%&*#$"3!N, , , , ,s s s s s s sr   r,  c                   (   e Zd ZU dZdZeej                 ed<   dZ	eej                 ed<   dZ
ej        ed<   dZej        ed<   dZeeeej                                   ed<   dZeeej                          ed<   dZeeej                          ed	<   dS )
GPT2DoubleHeadsModelOutputa  
    Base class for outputs of models predicting if two sentences are consecutive or not.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Language modeling loss.
        mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
            Multiple choice classification loss.
        logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
            Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
        past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
            Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
            sequence_length, embed_size_per_head)`).

            Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
            `past_key_values` input) to speed up sequential decoding.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            GPT2Attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads.
    Nlossmc_losslogits	mc_logitsr.  r   
attentions)r   r   r   r   rM  r   rK   r   __annotations__rN  rO  rP  r.  r   r   rQ   r   r]   rL  rL    s          > )-D(5$
%,,,+/GXe'(/// $FE$$$#'Iu '''AEOXeE%*;$<=>EEE8<M8E%"345<<<59Ju01299999r   rL  a>  

    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`GPT2Config`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
            `input_ids_length` = `sequence_length` if `past_key_values` is `None` else
            `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
            sequence tokens in the vocabulary.

            If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
            `input_ids`.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
            Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
            `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
            their past given to this model should not be passed as `input_ids` as they have already been computed.
        attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for
            `past_key_values`. In other words, the `attention_mask` always has to have the length:
            `len(past_key_values) + len(input_ids)`

            [What are attention masks?](../glossary#attention-mask)
        token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.

            If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
            `past_key_values`).
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
a  
    This is an experimental feature and is a subject to change at a moment's notice.

    Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
    it will evenly distribute blocks across all devices.

    Args:
        device_map (`Dict[int, list]`, *optional*):
            A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
            automatically mapped to the first device (for esoteric reasons). That means that the first device should
            have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the
            following number of attention modules:

                - openai-community/gpt2: 12
                - openai-community/gpt2-medium: 24
                - openai-community/gpt2-large: 36
                - openai-community/gpt2-xl: 48

    Example:

    ```python
    # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:
    model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-xl")
    device_map = {
        0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
        1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
        2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],
        3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47],
    }
    model.parallelize(device_map)
    ```
aq  
    Moves the model to cpu from a model parallel state.

    Example:

    ```python
    # On a 4 GPU machine with openai-community/gpt2-large:
    model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-large")
    device_map = {
        0: [0, 1, 2, 3, 4, 5, 6, 7],
        1: [8, 9, 10, 11, 12, 13, 14, 15],
        2: [16, 17, 18, 19, 20, 21, 22, 23],
        3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
    }
    model.parallelize(device_map)  # Splits the model across several devices
    model.deparallelize()  # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
    ```
z^The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.c            !       J    e Zd ZdZ fdZ ee          dd            Z ee          d             Z	d Z
d Zd Z ee           eeee	          	 	 	 	 	 	 	 	 	 	 	 	 	 dd
eej                 deeeej                                   deej                 deej                 deej                 deej                 deej                 deej                 deej                 dee         dee         dee         dee         deeef         fd                        Z xZS )	GPT2ModelFc                 R   t                                                     j        | _        t	          j        j        | j                  | _        t	          j        j        | j                  | _	        t	          j
        j                  | _        t	          j        fdt          j                  D                       | _        t	          j        | j        j                  | _        d| _        d | _        d| _        j        | _        |                                  d S )Nc                 2    g | ]}t          |           S ))rx   )r  ).0irO   s     r]   
<listcomp>z&GPT2Model.__init__.<locals>.<listcomp>  s&    hhh1	&A > > >hhhr   r  F)rf   rg   ro   rp   r   r9  
vocab_sizer2   rh   r1   r}   
embd_pdropdrop
ModuleListrangenum_hidden_layershr  r  ln_fmodel_parallel
device_mapgradient_checkpointingr  	post_initr   rO   r   s    `r]   rg   zGPT2Model.__init__  s       +< 14>BB< >OOJv011	hhhhfNfHgHghhhiiLV5NOOO	 $&+#$*$?! 	r   Nc                    t          j        dt                     |Kt          t	          | j                  t          t          j        	                                                    n|| _
        t          | j
        t	          | j                             d| _        d| j
                                        v rdn5dt          t          | j
                                                            z   | _        dt          t#          | j
                                                            z   | _        | j                            | j                  | _        | j                            | j                  | _        | j
                                        D ]D\  }}|D ]<}dt          |          z   }| j        |                             |          | j        |<   =E| j                            | j                  | _        d S )Na6  `GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1, ...}Tcpucuda:)warningswarnFutureWarningr%   rF   ra  r_  rK   r  device_countrd  r$   rc  keysstrr   first_devicemaxlast_devicer2   r   r1   itemsrb  )r   rd  r   vblockcuda_devices         r]   parallelizezGPT2Model.parallelize  s    	 	
 	
 	
 NXM_N3tv;;ej.E.E.G.G(H(HIIIeo 	 	$/3tv;;777"%*do.B.B.D.D%D%DEE'TWX[\`\k\p\p\r\rXsXsTtTtJt"ST_-A-A-C-C)D)D%E%EE8;;t0118;;t011O))++ 	> 	>DAq > >%A. $u 0 0 = =u> ILL!122			r   c                     t          j        dt                     d| _        d | _        d| _        d| _        | j                            d          | _        | j	                            d          | _	        t          t          | j                            D ]*}| j        |                             d          | j        |<   +| j                            d          | _        t          j                                         d S )N\Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.Fri  )rk  rl  rm  rc  rd  rq  rs  r2   r   r1   r_  rF   ra  rb  rK   r  empty_cache)r   r   s     r]   deparallelizezGPT2Model.deparallelize  s    j	
 	
 	
 $! 8;;u%%8;;u%%3tv;;'' 	4 	4E F5M,,U33DF5MMILL''	
     r   c                     | j         S r   r2   r   s    r]   get_input_embeddingszGPT2Model.get_input_embeddings  s	    xr   c                     || _         d S r   r~  r   new_embeddingss     r]   set_input_embeddingszGPT2Model.set_input_embeddings  s    !r   c                     |                                 D ]*\  }}| j        |         j                            |           +dS )zv
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        N)rt  ra  r  r   )r   heads_to_prunelayerr   s       r]   _prune_headszGPT2Model._prune_heads  sM     +0022 	2 	2LE5F5M**51111	2 	2r   
checkpointoutput_typerB  	input_idsr.  r   token_type_idsposition_idsr   inputs_embedsr   r   r   r   output_hidden_statesreturn_dictr   c                   $ ||n| j         j        }||n| j         j        }|
|
n| j         j        }
||n| j         j        }||t          d          |T|                     ||           |                                }|                    d|d                   }|j	        d         }n;|*|                                d d         }|j	        d         }nt          d          ||j
        n|j
        }||                    d|d                   }|(d}t          d gt          | j                  z            }n!|d         d                             d          }|@t          j        ||d         |z   t          j        |          }|                    d          }||                     |          }|                     |          }||z   $| j        dk    o|du o|d u }||                    |d          nd }| j        d	k    r|d|v r|nd }no|rt+          |||d         f||
          }nQ|O|d d d d d d f         }|                    | j                  }d|z
  t          j        | j                  j        z  }| j         j        rw|u|                                \  }}}||f}|	t          j        ||          }	|rt9          |	|j        |d                   }	n#| j        d	k    s|                     |	          }	nd }	|                     || j         j                  }||                     |          }$|z   $|                      $          $d|dd          z   $                    d          fz   }| j!        r%| j"        r|
rtF          $                    d           d}
|
rdnd }|rdnd }|r| j         j        rdnd }|rdnd }tK          tM          | j        |                    D ]\  }\  }} | j'        rt          j(        )                    $j
                   | t          $fd| D                       } ||                    $j
                  }tU          |t          j+                  r|                    $j
                  }|r|$fz   }| j!        r0| j"        r)| ,                    |j-        $d |||         ||	|
|	  	        }!n |$| |||         ||	|
|          }!|!d         $|
du r||!d         fz   }|r,||!|
rdnd         fz   }| j         j        r||!|
rdnd         fz   }| j'        rn| j.        /                                D ]T\  }"}#||#d         k    rCdta          |"          z   | j1        k    r($                    dta          |"dz             z             $U| 2                    $          $$                    |          $|r|$fz   }|st          d $||||fD                       S tg          $||||          S )NzDYou cannot specify both input_ids and inputs_embeds at the same timer   r   z5You have to specify either input_ids or inputs_embedsr   r   r  Fr  )r   input_shaper  past_key_values_lengthrb   r   r   )maskrc   r   )r   r&   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...rS  c              3   L   K   | ]}|                     j                  V  d S r   )r   r   )rX  
past_stater   s     r]   	<genexpr>z$GPT2Model.forward.<locals>.<genexpr>V  s2      &h&hzz}}]5I'J'J&h&h&h&h&h&hr   )r   r   r   r   r   r   r   Tr3   r   rj  c              3      K   | ]}||V  	d S r   rS  )rX  ru  s     r]   r  z$GPT2Model.forward.<locals>.<genexpr>  s0        =  === r   )last_hidden_stater.  r   rQ  cross_attentions)4rO   r   r  r   use_return_dictrI   %warn_if_padding_and_no_attention_maskr   rm   rH   r   tuplerF   ra  rK   arangelong	unsqueezer2   r1   r  r   r   rc   r   r   r   rk   r   invert_attention_maskget_head_maskr?  r]  re  r   r7   r   	enumeraterB   rc  r  
set_devicer4  r   _gradient_checkpointing_func__call__rd  rt  rp  rs  rb  r   )%r   r  r.  r   r  r  r   r  r   r   r   r   r  r  r  
batch_sizer   past_lengthposition_embeds	_use_sdpaencoder_batch_sizeencoder_sequence_lengthr   encoder_hidden_shapetoken_type_embedsoutput_shapepresentsall_self_attentionsall_cross_attentionsall_hidden_statesrY  rv  r   r   r   ru  r   s%                                       @r]   r   zGPT2Model.forward  s   , 2C1N--TXT_Tq$8$D  $+Jj 	 "+!6IIDK<Q	%0%<kk$+B] ]%>cddd"66y.QQQ#..**K!r;r?;;I"+JJ&',,..ss3K&,Q/JJTUUU%.%:!!@T%+00[_EEN"K#TFS[[$899OO)!,Q/44R88K <[_{5RZ_ZdmstttL'11!44L  HHY//M((<00%7 -7l<MQV<Vl[dhl[l	@N@Z,,Z<<<`d$(;;;0>0JqTbObOb^^imNN 	VG-'R9+'2	  NN ) "04qqq0@!A "0!2!2!2!D!D"%"6%+dj:Q:Q:U!U ;* 	*/D/P=R=W=W=Y=Y: 7$68O#P %-).4HQW)X)X)X& \)L/}7JT_`bTc* * *&& .2EEE)-)C)CDZ)[)[&%)" &&y$+2EFF	% $ 8 8),==M		-00{122.-2D2DR2H2H1JJ& 	"4= 	" "##p   "	",22$5?bb4%6d4;;Zdrr`d"6@BBD&/DFO0L0L&M&M 4	O 4	O"A"z" 	C
%%m&:;;;)!&&h&h&h&h]g&h&h&h!h!hJ!-%3%6%6}7K%L%LNi66 C )]-A B BI# I$58H$H!* t} ;;N!"aL)*%
 
  %!)#1'l*?+A'&7	 	 	 $AJMD  #wqzm3  d&9W)EZQQYZ=[<]&]#;2 d+?7PYK`11_`CaBc+c( " O O1133 O ODAqAbEzzgA&6$:J&J&J(5(8(83q1u::9M(N(N		-00%**<88 	E 1]4D D 	  '3DFY[op      9+$+*1
 
 
 	
r   r   )NNNNNNNNNNNNN)r   r   r   !_supports_param_buffer_assignmentrg   r   PARALLELIZE_DOCSTRINGrx  DEPARALLELIZE_DOCSTRINGr|  r  r  r  r   GPT2_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr   _CONFIG_FOR_DOCr   rK   
LongTensorr   r   r   rl   r   r   r   r   s   @r]   rU  rU  x  sR       
 ).%    * /003 3 3 1034 122! ! 32!   " " "2 2 2 +*+@AA&=$   15@D6:593715598<>B$(,0/3&*G
 G
E,-G
 "%el(;"<=G
 !!23	G

 !!12G
 u/0G
 E-.G
   12G
  (5G
 !)): ;G
 D>G
 $D>G
 'tnG
 d^G
 
u??	@G
 G
 G
  BAG
 G
 G
 G
 G
r   rU  z
    The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
    embeddings).
    c            #           e Zd ZdgZ fdZ ee          dd            Z ee          d             Z	d Z
d Z ee           eeee          	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	eej                 d
eeeej                                   deej                 deej                 deej                 deej                 deej                 deej                 deej                 deej                 dee         dee         dee         dee         deeef         fd                        Zed
eeej                          dej        deeej                          fd            Z xZS )GPT2LMHeadModellm_head.weightc                    t                                          |           t          |          | _        t	          j        |j        |j        d          | _        d| _	        d | _
        |                                  d S NFr0   )rf   rg   rU  r-  r   r5  n_embdr[  lm_headrc  rd  rf  rg  s     r]   rg   zGPT2LMHeadModel.__init__  sq       $V,,y0ANNN $ 	r   Nc                    t          j        dt                     |Pt          t	          | j        j                  t          t          j	        
                                                    n|| _        t          | j        t	          | j        j                             | j                            | j                   | j                            | j        j                  | _        d| _        d S )NaT  `GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}T)rk  rl  rm  r%   rF   r-  ra  r_  rK   r  rn  rd  r$   rx  r  r   rq  rc  r   rd  s     r]   rx  zGPT2LMHeadModel.parallelize  s    - 	
 	
 	
 ! 3t/122E%*:Q:Q:S:S4T4TUUU 	
 	$/3t/?/A+B+BCCC$$T_555|t'7'DEE"r   c                 2   t          j        dt                     | j                                         | j                            d          | _        | j                            d          | _        d| _        t          j	        
                                 d S Nrz  ri  F)rk  rl  rm  r-  r|  r   r  rc  rK   r  r{  r  s    r]   r|  zGPT2LMHeadModel.deparallelize  s    j	
 	
 	
 	&&(((+..u55|u--#
     r   c                     | j         S r   r  r  s    r]   get_output_embeddingsz%GPT2LMHeadModel.get_output_embeddings  
    |r   c                     || _         d S r   r  r  s     r]   set_output_embeddingsz%GPT2LMHeadModel.set_output_embeddings      %r   r  r  r.  r   r  r  r   r  r   r   labelsr   r   r  r  r   c                 0   ||n| j         j        }|                     |||||||||	||||          }|d         }| j        rMt          j                            | j        j                   |                    | j	        j
        j                  }| 	                    |          }d}|
|
                    |j                  }
|dddddf                                         }|
dddf                                         }t                      } ||                    d|                    d                    |                    d                    }|s|f|dd         z   }||f|z   n|S t!          |||j        |j        |j        |j                  S )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
        N)r.  r   r  r  r   r  r   r   r   r   r  r  r   .r   r&   )rM  rO  r.  r   rQ  r  )rO   r  r-  rc  rK   r  r  rq  r   r  r.   r   r   r
   rm   r   r   r.  r   rQ  r  )r   r  r.  r   r  r  r   r  r   r   r  r   r   r  r  transformer_outputsr   	lm_logitsrM  shift_logitsshift_labelsloss_fctoutputs                          r]   r   zGPT2LMHeadModel.forward  s   : &1%<kk$+B]"..+))%'"7#9/!5# / 
 
 ,A.  	IJ!!$"2"?@@@),,T\-@-GHHMLL//	YYy/00F$S#2#qqq[1<<>>L!#qrr'?5577L'))H8L--b,2C2CB2G2GHH,J[J[\^J_J_``D 	F\$7$;;F)-)9TGf$$vE0/?-;*50A
 
 
 	
r   beam_idxc                 :    t          fd| D                       S )  
        This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
        [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
        beam_idx at every generation step.
        c              3   N   K   | ]}t          fd |D                       V   dS )c              3   t   K   | ]2}|                     d                     |j                            V  3dS r   Nindex_selectr   r   rX  r  r  s     r]   r  z;GPT2LMHeadModel._reorder_cache.<locals>.<genexpr>.<genexpr>1  C      jjQ[*))!X[[9J-K-KLLjjjjjjr   Nr  rX  r   r  s     r]   r  z1GPT2LMHeadModel._reorder_cache.<locals>.<genexpr>0  U       
 
 jjjj_ijjjjj
 
 
 
 
 
r   r  r.  r  s    `r]   _reorder_cachezGPT2LMHeadModel._reorder_cache'  =      
 
 
 
-
 
 
 
 
 	
r   r   NNNNNNNNNNNNNN)r   r   r   _tied_weights_keysrg   r   r  rx  r  r|  r  r  r   r  r   r  r   r  r   rK   r  r   r   r   rl   r   r   staticmethodr  r   r   s   @r]   r  r    s        ++
 
 
 
 
 /00# # # 10#$ 122	! 	! 32	!  & & & +*+@AA&5$   15@D6:593715598<>B-1$(,0/3&*G
 G
E,-G
 "%el(;"<=G
 !!23	G

 !!12G
 u/0G
 E-.G
   12G
  (5G
 !)): ;G
 )*G
 D>G
 $D>G
 'tnG
 d^G
  
u77	8!G
 G
 G
  BAG
R 
uU\23
?D|
	uU\"	#
 
 
 \
 
 
 
 
r   r  ag  
The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
c            #           e Zd ZdgZ fdZ ee          dd            Z ee          d             Z	d Z
d Z ee           eee          	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	eej                 d
eeeej                                   deej                 deej                 deej                 deej                 deej                 deej                 deej                 deej                 dee         dee         dee         dee         deeef         fd                        Zed
eeej                          dej        deeej                          fd            Z xZS )GPT2DoubleHeadsModelr  c                 8   t                                          |           d|_        t          |          | _        t          j        |j        |j        d          | _	        t          |          | _        d| _        d | _        |                                  d S )Nr&   Fr  )rf   rg   
num_labelsrU  r-  r   r5  r  r[  r  r   multiple_choice_headrc  rd  rf  rg  s     r]   rg   zGPT2DoubleHeadsModel.__init__B  s       $V,,y0ANNN$3F$;$;! $ 	r   Nc                 4   t          j        dt                     |Pt          t	          | j        j                  t          t          j	        
                                                    n|| _        t          | j        t	          | j        j                             | j                            | j                   | j                            | j        j                  | _        | j                            | j        j                  | _        d| _        d S )NaY  `GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}T)rk  rl  rm  r%   rF   r-  ra  r_  rK   r  rn  rd  r$   rx  r  r   rq  r  rc  r  s     r]   rx  z GPT2DoubleHeadsModel.parallelizeP  s    A 	
 	
 	
 ! 3t/122E%*:Q:Q:S:S4T4TUUU 	
 	$/3t/?/A+B+BCCC$$T_555|t'7'DEE$($=$@$@AQA^$_$_!"r   c                 p   t          j        dt                     | j                                         | j                            d          | _        | j                            d          | _        | j                            d          | _        d| _        t          j
                                         d S r  )rk  rl  rm  r-  r|  r   r  r  rc  rK   r  r{  r  s    r]   r|  z"GPT2DoubleHeadsModel.deparallelized  s    j	
 	
 	
 	&&(((+..u55|u--$($=$@$@$G$G!#
     r   c                     | j         S r   r  r  s    r]   r  z*GPT2DoubleHeadsModel.get_output_embeddingsq  r  r   c                     || _         d S r   r  r  s     r]   r  z*GPT2DoubleHeadsModel.set_output_embeddingst  r  r   )r  rB  r  r.  r   r  r  r   r  mc_token_idsr  	mc_labelsr   r   r  r  r   c                 8   ||n| j         j        }|                     |||||||||||          }|d         }| j        rMt          j                            | j        j                   |                    | j	        j
        j                  }| 	                    |          }|                     ||                              d          }d}|
Tt                      } ||                    d|                    d                    |
                    d                    }d}|	|	                    |j                  }	|dddddf                                         }|	dddf                                         }t                      } ||                    d|                    d                    |                    d                    }|s!||f|dd         z   }||f|z   }||f|z   n|S t%          |||||j        |j        |j                  S )a  
        mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
            Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
            1]`.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to
            `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`
        mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
            where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)

        Return:

        Example:

        ```python
        >>> import torch
        >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel

        >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
        >>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2")

        >>> # Add a [CLS] to the vocabulary (we should train it also!)
        >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"})
        >>> # Update the model embeddings with the new vocabulary size
        >>> embedding_layer = model.resize_token_embeddings(len(tokenizer))

        >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
        >>> encoded_choices = [tokenizer.encode(s) for s in choices]
        >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]

        >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0)  # Batch size: 1, number of choices: 2
        >>> mc_token_ids = torch.tensor([cls_token_location])  # Batch size: 1

        >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
        >>> lm_logits = outputs.logits
        >>> mc_logits = outputs.mc_logits
        ```N
r.  r   r  r  r   r  r   r   r  r  r   r   .r&   )rM  rN  rO  rP  r.  r   rQ  )rO   r  r-  rc  rK   r  r  rq  r   r  r.   r   r  rA   r
   rm   r   r   rL  r.  r   rQ  )r   r  r.  r   r  r  r   r  r  r  r  r   r   r  r  r   r  r   r  rP  rN  r  lm_lossr  r  r  s                             r]   r   zGPT2DoubleHeadsModel.forwardw  sQ   v &1%<kk$+B]"..+))%'/!5# / 
 
 ,A.  	IJ!!$"2"?@@@),,T\-@-GHHMLL//	--m\JJRRSUVV	 '))Hhy~~b)..2D2DEEy~~VXGYGYZZGYYy/00F$S#2#qqq[1<<>>L!#qrr'?5577L'))Hh|00\5F5Fr5J5JKK\M^M^_aMbMbccG 	L+.A!"".EEF"!f,,3,?WJ''VK)/?-;*5
 
 
 	
r   r  c                 :    t          fd| D                       S )r  c              3   N   K   | ]}t          fd |D                       V   dS )c              3   t   K   | ]2}|                     d                     |j                            V  3dS r  r  r  s     r]   r  z@GPT2DoubleHeadsModel._reorder_cache.<locals>.<genexpr>.<genexpr>  r  r   Nr  r  s     r]   r  z6GPT2DoubleHeadsModel._reorder_cache.<locals>.<genexpr>  r  r   r  r  s    `r]   r  z#GPT2DoubleHeadsModel._reorder_cache  r  r   r   r  )r   r   r   r  rg   r   r  rx  r  r|  r  r  r   r  r#   rL  r  r   rK   r  r   r   r   rl   r   r   r  r  r   r   s   @r]   r  r  6  s        ++     /00# # # 10#& 122
! 
! 32
!  & & & +*+@AA+ETcddd 15@D6:5937155937-104$(,0/3&*m
 m
E,-m
 "%el(;"<=m
 !!23	m

 !!12m
 u/0m
 E-.m
   12m
 u/0m
 )*m
 E,-m
 D>m
 $D>m
 'tnm
 d^m
" 
u00	1#m
 m
 m
 ed BAm
^ 
uU\23
?D|
	uU\"	#
 
 
 \
 
 
 
 
r   r  a  
    The GPT2 Model transformer with a sequence classification head on top (linear layer).

    [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-1) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    c                       e Zd Z fdZ ee           edee          	 	 	 	 	 	 	 	 	 	 	 	 dde	e
j                 de	eee
j                                   de	e
j                 de	e
j                 d	e	e
j                 d
e	e
j                 de	e
j                 de	e
j                 de	e         de	e         de	e         de	e         deeef         fd                        Z xZS )GPT2ForSequenceClassificationc                    t                                          |           |j        | _        t          |          | _        t          j        |j        | j        d          | _        d| _	        d | _
        |                                  d S r  )rf   rg   r  rU  r-  r   r5  r  scorerc  rd  rf  rg  s     r]   rg   z&GPT2ForSequenceClassification.__init__  sz        +$V,,Yv}doEJJJ
 $ 	r   zmicrosoft/DialogRPT-updownr  Nr  r.  r   r  r  r   r  r  r   r   r  r  r   c                    ||n| j         j        }|                     ||||||||	|
||          }|d         }|                     |          }||j        dd         \  }}n|j        dd         \  }}| j         j        |dk    s
J d            | j         j        d}n|rt          j        || j         j                                                  	                    d          dz
  }||j        d         z  }|
                    |j                  }n)d}t                              | j        j         d           |t          j        ||j        	          |f         }d}|Z| j         j        f| j        dk    rd
| j         _        nN| j        dk    r7|j        t          j        k    s|j        t          j        k    rd| j         _        nd| j         _        | j         j        d
k    rWt+                      }| j        dk    r1 ||                                |                                          }n |||          }n| j         j        dk    rGt/                      } ||                    d| j                  |                    d                    }n*| j         j        dk    rt3                      } |||          }|s|f|dd         z   }||f|z   n|S t5          |||j        |j        |j                  S )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr  r   r3   r&   z=Cannot handle batch sizes > 1 if no padding token is defined.r   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  
regressionsingle_label_classificationmulti_label_classification)rM  rO  r.  r   rQ  )rO   r  r-  r  rH   pad_token_idrK   eqrG   argmaxr   r   r7   r   r   r   r  problem_typer  rc   r  r   rA   r
   rm   r	   r   r.  r   rQ  )r   r  r.  r   r  r  r   r  r  r   r   r  r  r  r   rO  r  sequence_lengthsequence_lengthspooled_logitsrM  r  r  s                          r]   r   z%GPT2ForSequenceClassification.forward  sR   6 &1%<kk$+B]"..+))%'/!5# / 
 
 ,A.M** *3/"1"*='J*7*=bqb*A'J K$0J!OOOJ 5DOO;#+!$#(8It{7O#P#P#T#T#V#V#]#]^`#a#ade#e #3iob6I#I #3#6#6v}#E#E  #% ##~. ^ ^ ^  
 u|Jv}MMMO__`{'/?a''/;DK,,_q((flej.H.HFL\a\eLeLe/LDK,,/KDK,{'<77"99?a''#8M$9$9$;$;V^^=M=MNNDD#8M6::DD)-JJJ+--x 2 22t G GUWYY)-III,..xv66 	F#%(;ABB(??F)-)9TGf$$vE/ /?-;*5
 
 
 	
r   NNNNNNNNNNNN)r   r   r   rg   r   r  r   r   r  r   rK   r  r   r   r   rl   r   r   r   r   s   @r]   r  r    s             +*+@AA/4$   15@D6:59371559-1$(,0/3&*`
 `
E,-`
 "%el(;"<=`
 !!23	`

 !!12`
 u/0`
 E-.`
   12`
 )*`
 D>`
 $D>`
 'tn`
 d^`
 
u66	7`
 `
 `
  BA`
 `
 `
 `
 `
r   r  z
    GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
    Named-Entity-Recognition (NER) tasks.
    c                       e Zd Z fdZ ee           edeedg d          	 	 	 	 	 	 	 	 	 	 	 	 dde	e
j                 de	eee
j                                   d	e	e
j                 d
e	e
j                 de	e
j                 de	e
j                 de	e
j                 de	e
j                 de	e         de	e         de	e         de	e         deeef         fd                        Z xZS )GPT2ForTokenClassificationc                    t                                          |           |j        | _        t          |          | _        t          |d          r|j        |j        }n!t          |d          r|j        |j        }nd}t          j	        |          | _
        t          j        |j        |j                  | _        d| _        d | _        |                                  d S )Nclassifier_dropouthidden_dropoutg?F)rf   rg   r  rU  r-  r   r  r  r   r}   r   r5  ro   
classifierrc  rd  rf  )r   rO   r  r   s      r]   rg   z#GPT2ForTokenClassification.__init__  s        +$V,,6/00 	%V5N5Z!'!:V-.. 	%63H3T!'!6!$z"455)F$68IJJ $ 	r   zbrad1141/gpt2-finetuned-comp2g      ?)Leadr  r  Positionr  r  r  r  r  r  r  r  )r  r  rB  expected_lossexpected_outputNr  r.  r   r  r  r   r  r  r   r   r  r  r   c                    ||n| j         j        }|                     ||||||||	|
||          }|d         }|                     |          }|                     |          }d}|`|                    |j                  }t                      } ||                    d| j	                  |                    d                    }|s|f|dd         z   }||f|z   n|S t          |||j        |j                  S )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr  r   r   r3   )rM  rO  r   rQ  )rO   r  r-  r   r  r   r   r
   rm   r  r   r   rQ  )r   r  r.  r   r  r  r   r  r  r   r   r  r  r  r   rO  rM  r  r  s                      r]   r   z"GPT2ForTokenClassification.forward  s5   X &1%<kk$+B]"..+))%'/!5# / 
 
 ,A.]33//YYv}--F'))H8FKKDO<<fkk"ooNND 	FY!4QRR!88F)-)9TGf$$vE$-;*5	
 
 
 	
r   r  )r   r   r   rg   r   r  r   r   r  r   rK   r  r   r   r   rl   r   r   r   r   s   @r]   r  r  }  s           * +*+@AA2)$
 
 
  . 15@D6:59371559-1$(,0/3&*8
 8
E,-8
 "%el(;"<=8
 !!23	8

 !!128
 u/08
 E-.8
   128
 )*8
 D>8
 $D>8
 'tn8
 d^8
 
u++	,8
 8
 8
+  BA.8
 8
 8
 8
 8
r   r  z
    The GPT-2 Model transformer with a span classification head on top for extractive question-answering tasks like
    SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
    c                       e Zd Z fdZ ee                    d                     eee	e
e          	 	 	 	 	 	 	 	 	 	 	 ddeej                 deej                 deej                 deej                 d	eej                 d
eej                 deej                 deej                 dee         dee         dee         deee	f         fd                        Z xZS )GPT2ForQuestionAnsweringc                    t                                          |           |j        | _        t          |          | _        t          j        |j        d          | _        d| _	        d | _
        |                                  d S )Nr3   F)rf   rg   r  rU  r-  r   r5  ro   
qa_outputsrc  rd  rf  rg  s     r]   rg   z!GPT2ForQuestionAnswering.__init__  st        +$V,,)F$6:: $ 	r   zbatch_size, sequence_length)r  r  rB  real_checkpointNr  r   r  r  r   r  start_positionsend_positionsr   r  r  r   c                    ||n| j         j        }|                     |||||||	|
|	  	        }|d         }|                     |          }|                    dd          \  }}|                    d                                          }|                    d                                          }d}||t          |                                          dk    r-|                    d          	                    |j
                  }t          |                                          dk    r-|                    d          	                    |j
                  }|                    d          }|                    d|          }|                    d|          }t          |          } |||          } |||          }||z   dz  }|s||f|dd         z   }||f|z   n|S t          ||||j        |j        	          S )
a  
        start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        N)r   r  r  r   r  r   r  r  r   r&   r   r   )ignore_indexr3   )rM  start_logits
end_logitsr   rQ  )rO   r  r-  r  rC   rA   r   rF   r   r   r   clampr
   r   r   rQ  )r   r  r   r  r  r   r  r  r  r   r  r  r   sequence_outputrO  r   r!  
total_lossignored_indexr  
start_lossend_lossr  s                          r]   r   z GPT2ForQuestionAnswering.forward  s7   > &1%<kk$+B]""))%'/!5# # 

 

 "!*11#)<<r<#:#: j#++B//::<<''++6688

&=+D?''))**Q.."1"9"9""="="@"@AT"U"U=%%''((1,, - 5 5b 9 9 < <Z=N O O(--a00M-33A}EEO)//=AAM']CCCH!,@@Jx
M::H$x/14J 	R"J/'!""+=F/9/EZMF**6Q+%!!/)
 
 
 	
r   )NNNNNNNNNNN)r   r   r   rg   r   r  formatr   r  r   r  r   rK   r  r   rl   r   r   r   r   r   s   @r]   r  r    s            +*+@+G+GHe+f+fgg&0$+	   156:593715596:48,0/3&*H
 H
E,-H
 !!23H
 !!12	H

 u/0H
 E-.H
   12H
 "%"23H
   01H
 $D>H
 'tnH
 d^H
 
u22	3H
 H
 H
  hgH
 H
 H
 H
 H
r   r  )Qr   r=  r9   rk  dataclassesr   typingr   r   r   rK   torch.utils.checkpoint	packagingr   r   torch.nnr	   r
   r   activationsr   
generationr   modeling_attn_mask_utilsr   r   modeling_outputsr   r   r   r   r   modeling_utilsr   r   pytorch_utilsr   r   r   utilsr   r   r   r   r   r    r!   r"   r#   utils.model_parallel_utilsr$   r%   configuration_gpt2r'   modeling_flash_attention_utilsr(   
get_loggerr   r7   r  r  r^   Moduler`   r   r   r  r  r  r,  rL  GPT2_START_DOCSTRINGr  r  r  rU  r  r  r  r  r  rS  r   r]   <module>r;     s    " !  				  ! ! ! ! ! ! ) ) ) ) ) ) ) ) ) )                  A A A A A A A A A A ! ! ! ! ! ! ) ) ) ) ) ) w w w w w w w w              ? > > > > > > > Y Y Y Y Y Y Y Y Y Y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 L K K K K K K K * * * * * *  KJJJJJJ 
	H	%	%- 5 5 5p[ [ [ [ [BI [ [ [|n n n n n- n n nbh* h* h* h* h* h* h* h*V    bi   " $1GZduvv L L L L L	 L L L^,s ,s ,s ,s ,s/ ,s ,s ,s^ &: &: &: &: &: &: &: &:R  B F @ ( d ^
 ^
 ^
 ^
 ^
# ^
 ^
	 ^
B	   O
 O
 O
 O
 O
)? O
 O
 O
d   u
 u
 u
 u
 u
. u
 u
 u
p   t
 t
 t
 t
 t
$7 t
 t
 t
n   e
 e
 e
 e
 e
!4 e
 e
 e
P   ]
 ]
 ]
 ]
 ]
2 ]
 ]
 ]
 ]
 ]
r   