§
    ÑìNgú  ã                  ój   — d dl mZ d dlmZ d dlmZ d dlmZ ddlmZ ddl	m	Z	  G d„ d	e¦  «        Z
d
S )é    )Úannotations)ÚAny)ÚModule)ÚSentenceTransformeré   )ÚAdaptiveLayerLoss)ÚMatryoshkaLossc                  óT   ‡ — e Zd Z	 	 	 	 	 	 	 ddˆ fd„Zdˆ fd„Zedd„¦   «         Zˆ xZS ) ÚMatryoshka2dLossNr   ç      ð?ç333333Ó?Úmodelr   Úlossr   Úmatryoshka_dimsú	list[int]Úmatryoshka_weightsúlist[float | int] | NoneÚn_layers_per_stepÚintÚn_dims_per_stepÚlast_layer_weightÚfloatÚprior_layers_weightÚkl_div_weightÚkl_temperatureÚreturnÚNonec           	     ó€   •— t          |||||¬¦  «        }t          ¦   «                              ||||||	|
¬¦  «         dS )a¨  
        The Matryoshka2dLoss can be seen as a loss *modifier* that combines the :class:`AdaptiveLayerLoss` and the
        :class:`MatryoshkaLoss`. This allows you to train an embedding model that 1) allows users to specify the number
        of model layers to use, and 2) allows users to specify the output dimensions to use.

        The former is useful for when you want users to have the option to lower the number of layers used to improve
        their inference speed and memory usage, and the latter is useful for when you want users to have the option to
        lower the output dimensions to improve the efficiency of their downstream tasks (e.g. retrieval) or to lower
        their storage costs.

        Note, this uses `n_layers_per_step=1` and `n_dims_per_step=1` as default, following the original 2DMSE
        implementation.

        Args:
            model: SentenceTransformer model
            loss: The loss function to be used, e.g.
                :class:`MultipleNegativesRankingLoss`,
                :class:`CoSENTLoss`, etc.
            matryoshka_dims: A list of embedding dimensions to be used
                for the loss function, e.g. [768, 512, 256, 128, 64].
            matryoshka_weights: A list of weights to be used for the
                loss function, e.g. [1, 1, 1, 1, 1]. If None, then the
                weights will be set to 1 for all dimensions.
            n_layers_per_step: The number of layers to use per step. If
                -1, then all layers are used. If > 0, then a random
                sample of n_layers_per_step layers are used per step.
                The 2DMSE paper uses `n_layers_per_step=1`. The default
                value is -1.
            n_dims_per_step: The number of dimensions to use per step.
                If -1, then all dimensions are used. If > 0, then a
                random sample of n_dims_per_step dimensions are used per
                step. The default value is -1.
            last_layer_weight: The weight to use for the loss of the
                final layer. Increase this to focus more on the
                performance when using all layers. The default value is
                1.0.
            prior_layers_weight: The weight to use for the loss of the
                prior layers. Increase this to focus more on the
                performance when using fewer layers. The default value
                is 1.0.
            kl_div_weight: The weight to use for the KL-divergence loss
                that is used to make the prior layers match that of the
                last layer. Increase this to focus more on the
                performance when using fewer layers. The default value
                is 1.0.
            kl_temperature: The temperature to use for the KL-divergence
                loss. If 0, then the KL-divergence loss is not used. The
                default value is 1.0.

        References:
            - See the 2D Matryoshka Sentence Embeddings (2DMSE) paper: https://arxiv.org/abs/2402.14776
            - `Matryoshka Embeddings <../../examples/training/matryoshka/README.html>`_
            - `Adaptive Layers <../../examples/training/adaptive_layer/README.html>`_

        Requirements:
            1. The base loss cannot be :class:`CachedMultipleNegativesRankingLoss`.

        Inputs:
            +---------------------------------------+--------+
            | Texts                                 | Labels |
            +=======================================+========+
            | any                                   | any    |
            +---------------------------------------+--------+

        Relations:
            - :class:`MatryoshkaLoss` is used in this loss, and it is responsible for the dimensionality reduction.
            - :class:`AdaptiveLayerLoss` is used in this loss, and it is responsible for the layer reduction.

        Example:
            ::

                from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
                from datasets import Dataset

                model = SentenceTransformer("microsoft/mpnet-base")
                train_dataset = Dataset.from_dict({
                    "anchor": ["It's nice weather outside today.", "He drove to work."],
                    "positive": ["It's so sunny.", "He took the car to the office."],
                })
                loss = losses.MultipleNegativesRankingLoss(model)
                loss = losses.Matryoshka2dLoss(model, loss, [768, 512, 256, 128, 64])

                trainer = SentenceTransformerTrainer(
                    model=model,
                    train_dataset=train_dataset,
                    loss=loss,
                )
                trainer.train()
        )r   r   )r   r   r   r   r   N)r	   ÚsuperÚ__init__)Úselfr   r   r   r   r   r   r   r   r   r   Úmatryoshka_lossÚ	__class__s               €úi/var/www/html/ai-engine/env/lib/python3.11/site-packages/sentence_transformers/losses/Matryoshka2dLoss.pyr    zMatryoshka2dLoss.__init__   sk   ø€ õL )ØØØØ1Ø+ð
ñ 
ô 
ˆõ 	‰Œ×ÒØØØ/Ø/Ø 3Ø'Ø)ð 	ñ 	
ô 	
ð 	
ð 	
ð 	
ó    údict[str, Any]c                óz   •— i t          ¦   «                              ¦   «         ¥| j                             ¦   «         ¥S )N)r   Úget_config_dictr   )r!   r#   s    €r$   r(   z Matryoshka2dLoss.get_config_dict…   s;   ø€ ð
Ý‰gŒg×%Ò%Ñ'Ô'ð
àŒi×'Ò'Ñ)Ô)ð
ð 	
r%   Ústrc                ó   — dS )Nzï
@misc{li20242d,
    title={2D Matryoshka Sentence Embeddings},
    author={Xianming Li and Zongxi Li and Jing Li and Haoran Xie and Qing Li},
    year={2024},
    eprint={2402.14776},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}
© )r!   s    r$   ÚcitationzMatryoshka2dLoss.citation‹   s   € ð	ð 	r%   )Nr   r   r   r   r   r   )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   )r   r&   )r   r)   )Ú__name__Ú
__module__Ú__qualname__r    r(   Úpropertyr,   Ú__classcell__)r#   s   @r$   r   r      sŸ   ø€ € € € € ð 8<Ø!"Ø Ø#&Ø%(Ø"Ø #ðu
ð u
ð u
ð u
ð u
ð u
ð u
ðn
ð 
ð 
ð 
ð 
ð 
ð ð
ð 
ð 
ñ „Xð
ð 
ð 
ð 
ð 
r%   r   N)Ú
__future__r   Útypingr   Útorch.nnr   Úsentence_transformersr   r   r	   r   r+   r%   r$   ú<module>r6      s°   ðØ "Ð "Ð "Ð "Ð "Ð "à Ð Ð Ð Ð Ð à Ð Ð Ð Ð Ð à 5Ð 5Ð 5Ð 5Ð 5Ð 5à 0Ð 0Ð 0Ð 0Ð 0Ð 0Ø *Ð *Ð *Ð *Ð *Ð *ðIð Ið Ið Ið IÐ(ñ Iô Ið Ið Ið Ir%   