
    g'!                     d    d Z ddlmZ ddlmZ dZ ee           G d de                      ZdS )zRAG model configuration   )PretrainedConfig)add_start_docstringsa)  
    [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
    can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.

    Args:
        title_sep (`str`, *optional*, defaults to  `" / "`):
            Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
        doc_sep (`str`, *optional*, defaults to  `" // "`):
            Separator inserted between the text of the retrieved document and the original input when calling
            [`RagRetriever`].
        n_docs (`int`, *optional*, defaults to 5):
            Number of documents to retrieve.
        max_combined_length (`int`, *optional*, defaults to 300):
            Max length of contextualized input returned by [`~RagRetriever.__call__`].
        retrieval_vector_size (`int`, *optional*, defaults to 768):
            Dimensionality of the document embeddings indexed by [`RagRetriever`].
        retrieval_batch_size (`int`, *optional*, defaults to 8):
            Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
            [`RagRetriever`].
        dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
            A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
            using `datasets.list_datasets()`).
        dataset_split (`str`, *optional*, defaults to `"train"`)
            Which split of the `dataset` to load.
        index_name (`str`, *optional*, defaults to `"compressed"`)
            The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
            `"compressed"`.
        index_path (`str`, *optional*)
            The path to the serialized faiss index on disk.
        passages_path (`str`, *optional*):
            A path to text passages compatible with the faiss index. Required if using
            [`~models.rag.retrieval_rag.LegacyIndex`]
        use_dummy_dataset (`bool`, *optional*, defaults to `False`)
            Whether to load a "dummy" variant of the dataset specified by `dataset`.
        label_smoothing (`float`, *optional*, defaults to 0.0):
            Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
            in the loss calculation. If set to 0, no label smoothing is performed.
        do_marginalize (`bool`, *optional*, defaults to `False`):
            If `True`, the logits are marginalized over all documents by making use of
            `torch.nn.functional.log_softmax`.
        reduce_loss (`bool`, *optional*, defaults to `False`):
            Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
        do_deduplication (`bool`, *optional*, defaults to `True`):
            Whether or not to deduplicate the generations from different context documents for a given input. Has to be
            set to `False` if used while training with distributed backend.
        exclude_bos_score (`bool`, *optional*, defaults to `False`):
            Whether or not to disregard the BOS token when computing the loss.
        output_retrieved(`bool`, *optional*, defaults to `False`):
            If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
            `context_attention_mask` are returned. See returned tensors for more detail.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        forced_eos_token_id (`int`, *optional*):
            The id of the token to force as the last generated token when `max_length` is reached. Usually set to
            `eos_token_id`.
c                        e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Zedededefd            Z xZ	S )	RagConfigragTN /  //    ,        wiki_dprtrain
compressedF        c                     t                      j        d
||||||||d| d|vsd|vrt          d| j         d|           |                    d          }|                    d          }|                    d          } |                     d          }!ddlm}"  |"j        |fi || _         |"j        |!fi | | _	        || _
        || _        || _        || _        || _        |	| _        |
| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        | j        t?          | j	        d	d           | _        d S d S )N)bos_token_idpad_token_ideos_token_iddecoder_start_token_idforced_eos_token_idis_encoder_decoderprefix
vocab_sizequestion_encoder	generatorzA configuraton of type zq cannot be instantiated because both `question_encoder` and `generator` sub-configurations were not passed, only 
model_type   )
AutoConfigr    ) super__init__
ValueErrorr   popauto.configuration_autor   	for_modelr   r   reduce_losslabel_smoothingexclude_bos_scoredo_marginalize	title_sepdoc_sepn_docsmax_combined_lengthdatasetdataset_split
index_nameretrieval_vector_sizeretrieval_batch_sizepassages_path
index_pathuse_dummy_datasetdataset_revisionoutput_retrieveddo_deduplication	use_cacher   getattr)$selfr   r   r   r   r   r   r   r+   r,   r-   r.   r2   r3   r/   r0   r1   r5   r4   r6   r'   r(   r9   r)   r*   r8   r:   r   r7   kwargsquestion_encoder_configquestion_encoder_model_typedecoder_configdecoder_model_typer   	__class__s$                                      e/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/rag/configuration_rag.pyr"   zRagConfig.__init__T   s   @ 	 
	
%%%#9 31!
	
 
	
 
	
 
	
 
	
 V++{&/H/Hm$/ m mdjm m   #)**-?"@"@&=&A&A,&O&O#K00+//==888888 4
 45P l lTk l l--.@SSNSS&.!2,"#6 *$%:"$8!*$!2 0 0 0"#+'.t~?TVZ'['[D$$$ ,+    r>   generator_configreturnc                 `     | d|                                 |                                 d|S )a  
        Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
        decoder model configuration.

        Returns:
            [`EncoderDecoderConfig`]: An instance of a configuration object
        )r   r   r    )to_dict)clsr>   rE   r=   s       rC   'from_question_encoder_generator_configsz1RagConfig.from_question_encoder_generator_configs   s<     sv$;$C$C$E$EQaQiQiQkQkvvouvvvrD   )NTNNNNNr   r	   r
   r   r   r   r   r   r   NNFFr   TFFFTNN)
__name__
__module____qualname__r   is_compositionr"   classmethodr   rJ   __classcell__)rB   s   @rC   r   r   O   s        JN #! ;V\ V\ V\ V\ V\ V\p 
w&6
wJZ
w	
w 
w 
w [
w 
w 
w 
w 
wrD   r   N)__doc__configuration_utilsr   utilsr   RAG_CONFIG_DOCr   r    rD   rC   <module>rU      s      3 3 3 3 3 3 ) ) ) ) ) )7t n%%gw gw gw gw gw  gw gw &%gw gw gwrD   