
    g&                     j    d Z ddlmZ ddlmZ ddlmZ  ej        e          Z	 G d de          Z
dS )	zFuyu model configuration   )PretrainedConfig)logging   )CONFIG_MAPPINGc                   d     e Zd ZdZdZdgZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Zd Z xZS )
FuyuConfiga  
    This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an
    Fuyu model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the
    [adept/fuyu-8b](https://huggingface.co/adept/fuyu-8b).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 262144):
            Vocabulary size of the Fuyu model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`FuyuForCausalLM`]
        hidden_size (`int`, *optional*, defaults to 4096):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 16384):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 36):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 64):
            Number of attention heads for each attention layer in the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
            The non-linear activation function (function or string) in the decoder.
        max_position_embeddings (`int`, *optional*, defaults to 16384):
            The maximum sequence length that this model might ever be used with.
        image_size (`int`, *optional*, defaults to 300):
            The input image size.
        patch_size (`int`, *optional*, defaults to 30):
            The input vision transformer encoding patch size.
        num_channels (`int`, *optional*, defaults to 3):
            The input image number of channels.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`. Whether to tie weight embeddings
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie input and output embeddings.
        rope_theta (`float`, *optional*, defaults to 25000.0):
            The base period of the RoPE embeddings.
        rope_scaling (`Dict`, *optional*):
            Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
            strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
            `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
            `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
            these scaling strategies behave:
            https://www.reddit.com/r/LocalFuyu/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
            experimental feature, subject to breaking API changes in future versions.
        qk_layernorm (`bool`, *optional*, defaults to `True`):
            Whether or not to normalize the Queries and Keys after projecting the hidden states
        hidden_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio after applying the MLP to the hidden states.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio after computing the attention scores.
        partial_rotary_factor (`float`, *optional*, defaults to 0.5):
            Percentage of the query and keys which will have rotary embedding.

        pad_token_id (`int`, *optional*):
            The id of the *padding* token.
        bos_token_id (`int`, *optional*, defaults to 1):
            The id of the *beginning-of-sequence* token.
        eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2):
            The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
        text_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize the `language``[`Aut`].

    ```python
    >>> from transformers import FuyuConfig

    >>> # Initializing a Fuyu fuyu-7b style configuration
    >>> configuration = FuyuConfig()
    ```fuyupast_key_values       @  $   @   relu2,     r   {Gz?h㈵>TF     j@N              ?   r   c                 n   |Ui d|d|d|d|d|d|d|d|d	|d
|d|d|d|d|d|d|d||||d}t                               d           d|v r|d         nd}t          |         di || _        || _        || _        || _        |	| _        |
| _        || _	        || _
        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        |                                   t1                      j        d||||d| d S )N
vocab_sizemax_position_embeddingshidden_sizeintermediate_sizenum_hidden_layersnum_attention_heads
hidden_actinitializer_rangelayer_norm_eps	use_cache
rope_thetarope_scalingqk_layernormhidden_dropoutattention_dropoutpartial_rotary_factorpad_token_id)bos_token_ideos_token_idtie_word_embeddingszEtext_config is None. initializing the text model with default values.
model_type	persimmon)r*   r+   r,   r-    )loggerinfor   text_config_vocab_sizer   
image_size
patch_sizenum_channelsr   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   _rope_scaling_validationsuper__init__)selfr   r   r   r   r   r    r   r5   r6   r7   r!   r"   r#   r-   r$   r%   r&   r'   r(   r)   r*   r+   r,   r3   kwargstext_model_type	__class__s                              g/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/fuyu/configuration_fuyu.pyr:   zFuyuConfig.__init__i   s/   8 j)+B { $%6	
 $%6 &': j $%6 !. Y j   !. $%6  ()>!" #$ !- ,':)  K, KK_```7C{7R7R+l33Xc)/:II[II%'>$$$(&!2!2#6 $!2,"$((,!2%:"%%''' 	
%%% 3		
 	

 	
 	
 	
 	
 	
    c                    | j         dS t          | j         t                    rt          | j                   dk    rt	          d| j                    | j                             dd          }| j                             dd          }||dvrt	          d|           |t          |t                    r|dk    rt	          d	|           dS )
z<
        Validate the `rope_scaling` configuration.
        Nr   zN`rope_scaling` must be a dictionary with two fields, `type` and `factor`, got typefactor)lineardynamiczF`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got g      ?z7`rope_scaling`'s factor field must be a float > 1, got )r%   
isinstancedictlen
ValueErrorgetfloat)r;   rope_scaling_typerope_scaling_factors      r?   r8   z#FuyuConfig._rope_scaling_validation   s    $F$+T22 	c$:K6L6LPQ6Q6Qwdhduww   !-11&$??"/33HdCC$(9AV(V(VlYjll   &j9Le.T.T&XkorXrXrlWjllmmm YsXrr@   )r   r   r   r   r   r   r   r   r   r   r   r   TFr   NTr   r   r   Nr   r   N)	__name__
__module____qualname____doc__r.   keys_to_ignore_at_inferencer:   r8   __classcell__)r>   s   @r?   r   r      s        J JX J#4"5  %!!3R
 R
 R
 R
 R
 R
hn n n n n n nr@   r   N)rQ   configuration_utilsr   utilsr   autor   
get_loggerrN   r1   r   r0   r@   r?   <module>rX      s      3 3 3 3 3 3       ! ! ! ! ! ! 
	H	%	%vn vn vn vn vn! vn vn vn vn vnr@   