
    g.                         d Z ddlmZ ddlmZmZmZmZ ddlm	Z	m
Z
mZ ddlmZ ddlmZmZ ddlmZ  ej        e          Z G d	 d
e          Z G d de          ZdS )zOpenAI GPT-2 configuration    )OrderedDict)AnyListMappingOptional   )PreTrainedTokenizer
TensorTypeis_torch_available)PretrainedConfig)OnnxConfigWithPastPatchingSpec)loggingc                   j     e Zd ZdZdZdgZdddddZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )
GPT2ConfigaK  
    This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to
    instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the GPT-2
    [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 50257):
            Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].
        n_positions (`int`, *optional*, defaults to 1024):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        n_embd (`int`, *optional*, defaults to 768):
            Dimensionality of the embeddings and hidden states.
        n_layer (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        n_head (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        n_inner (`int`, *optional*):
            Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
        activation_function (`str`, *optional*, defaults to `"gelu_new"`):
            Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
        resid_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        embd_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the embeddings.
        attn_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention.
        layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
            The epsilon to use in the layer normalization layers.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        summary_type (`string`, *optional*, defaults to `"cls_index"`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
            [`TFGPT2DoubleHeadsModel`].

            Has to be one of the following options:

                - `"last"`: Take the last token hidden state (like XLNet).
                - `"first"`: Take the first token hidden state (like BERT).
                - `"mean"`: Take the mean of all tokens hidden states.
                - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
                - `"attn"`: Not implemented now, use multi-head attention.
        summary_use_proj (`bool`, *optional*, defaults to `True`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
            [`TFGPT2DoubleHeadsModel`].

            Whether or not to add a projection after the vector extraction.
        summary_activation (`str`, *optional*):
            Argument used when doing sequence summary. Used in for the multiple choice head in
            [`GPT2DoubleHeadsModel`].

            Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
        summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
            [`TFGPT2DoubleHeadsModel`].

            Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
        summary_first_dropout (`float`, *optional*, defaults to 0.1):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
            [`TFGPT2DoubleHeadsModel`].

            The dropout ratio to be used after the projection and activation.
        scale_attn_weights (`bool`, *optional*, defaults to `True`):
            Scale attention weights by dividing by sqrt(hidden_size)..
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        bos_token_id (`int`, *optional*, defaults to 50256):
            Id of the beginning of sentence token in the vocabulary.
        eos_token_id (`int`, *optional*, defaults to 50256):
            Id of the end of sentence token in the vocabulary.
        scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
            Whether to additionally scale attention weights by `1 / layer_idx + 1`.
        reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
            Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
            dot-product/softmax to float() when training with mixed precision.

    Example:

    ```python
    >>> from transformers import GPT2Config, GPT2Model

    >>> # Initializing a GPT2 configuration
    >>> configuration = GPT2Config()

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = GPT2Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```gpt2past_key_valuesn_embdn_positionsn_headn_layer)hidden_sizemax_position_embeddingsnum_attention_headsnum_hidden_layersQ           Ngelu_new皙?h㈵>{Gz?	cls_indexTP  Fc                    || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        || _
        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _         t/                      j        d||d| d S )N)bos_token_ideos_token_id )
vocab_sizer   r   r   r   n_inneractivation_functionresid_pdrop
embd_pdrop
attn_pdroplayer_norm_epsiloninitializer_rangesummary_typesummary_use_projsummary_activationsummary_first_dropoutsummary_proj_to_labelsscale_attn_weights	use_cachescale_attn_by_inverse_layer_idxreorder_and_upcast_attnr'   r(   super__init__)selfr*   r   r   r   r   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r6   r5   r7   r8   r'   r(   r9   r:   kwargs	__class__s                            g/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/gpt2/configuration_gpt2.pyr<   zGPT2Config.__init__   s    6 %&#6 &$$"4!2( 0"4%:"&<#"4"/N,'>$((XlXXQWXXXXX    )r   r   r   r   r   Nr    r!   r!   r!   r"   r#   r$   TNTr!   TTr%   r%   FF)	__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferenceattribute_mapr<   __classcell__r?   s   @r@   r   r      s        _ _B J#4"5#0'&	 M & #!(- %14Y 4Y 4Y 4Y 4Y 4Y 4Y 4Y 4Y 4YrA   r   c                       e Zd Z	 	 	 ddededee         def fdZe	d	e
ee
eef         f         fd
            Ze	d	efd            Ze	d	efd            Z	 	 	 	 ddededededee         d	e
eef         f fdZe	d	efd            Z xZS )GPT2OnnxConfigdefaultNFconfigtaskpatching_specsuse_pastc                     t                                          ||||           t          | j        dd           sd| j        _        d S d S )N)rO   rP   rQ   pad_token_idr   )r;   r<   getattr_configrS   )r=   rN   rO   rP   rQ   r?   s        r@   r<   zGPT2OnnxConfig.__init__   sW     	d>T\]]]t|^T:: 	*()DL%%%	* 	*rA   returnc                     t          ddddi          }| j        r |                     |d           ddd|d<   nddd|d<   |S )	N	input_idsbatchsequence)r      inputs)	directionzpast_sequence + sequenceattention_mask)r   rQ   fill_with_past_key_values_)r=   common_inputss     r@   r\   zGPT2OnnxConfig.inputs   sp    #[g*2M2M$NOO= 	J++MX+NNN29>X.Y.YM*++29j.I.IM*+rA   c                     | j         j        S N)rU   r   r=   s    r@   
num_layerszGPT2OnnxConfig.num_layers   s    |##rA   c                     | j         j        S rb   )rU   r   rc   s    r@   r   z"GPT2OnnxConfig.num_attention_heads   s    |""rA   	tokenizer
batch_size
seq_lengthis_pair	frameworkc                 >   t          t          |                               |||||          }t          d|d         i          }| j        rwt                      st          d          dd l|d         j        \  }}	|	dz   }
|| j	        |
| j
        j        | j	        z  ffdt          | j                  D             |d<   |d         |d<   | j        rE|d         j        }                    |d                             ||
|	          gd
          |d<   |S )N)rh   ri   rj   rk   rX   zACannot generate dummy past_keys inputs without PyTorch installed.r      c                 d    g | ],}                                                              f-S r)   )zeros).0_
past_shapetorchs     r@   
<listcomp>z8GPT2OnnxConfig.generate_dummy_inputs.<locals>.<listcomp>   sC     5 5 5KLU[[,,ekk*.E.EF5 5 5rA   r   r^   )dtyper[   )dim)r;   r   generate_dummy_inputsr   rQ   r   
ValueErrorrs   shaper   rU   r   rangerd   ru   catones)r=   rg   rh   ri   rj   rk   r`   ordered_inputsrY   seqlenpast_key_values_length
mask_dtyperr   rs   r?   s               @@r@   rw   z$GPT2OnnxConfig.generate_dummy_inputs   sw    0$77MM*W`i N 
 

 %k=3M%NOO = 	%''  !deee -k : @v)/!&,*L,0HH	
5 5 5 5 5PUVZVePfPf5 5 501 ,99I+J'(= 	'(89?J/4yy 015::eE[cm:3n3nouv 09 0 0N+, rA   c                     dS )N   r)   rc   s    r@   default_onnx_opsetz!GPT2OnnxConfig.default_onnx_opset  s    rrA   )rM   NF)rf   rf   FN)rB   rC   rD   r   strr   r   boolr<   propertyr   intr\   rd   r   r	   r   r
   r   rw   r   rI   rJ   s   @r@   rL   rL      s        -1
* 
* 
* 
* \*	
*
 
* 
* 
* 
* 
* 
* WS#X%6 67    X $C $ $ $ X$ #S # # # X# *.* *&* * 	*
 * J'* 
c	* * * * * *X C    X    rA   rL   N)rE   collectionsr   typingr   r   r   r    r	   r
   r   configuration_utilsr   onnxr   r   utilsr   
get_loggerrB   loggerr   rL   r)   rA   r@   <module>r      s,    !   # # # # # # / / / / / / / / / / / / C C C C C C C C C C 3 3 3 3 3 3 4 4 4 4 4 4 4 4       
	H	%	%_Y _Y _Y _Y _Y! _Y _Y _YDN N N N N' N N N N NrA   