
    Ng}/                        d dl mZ d dlZd dlZd dlmZ d dlmZ d dlZd dlm	Z	m
Z
 d dlmZ d dlmZ d dlmZ d d	lmZ d d
lmZ  G d d          Z G d d          Z G d de
j                  ZdS )    )annotationsN)Iterable)Any)Tensornn)
functional)SentenceTransformer)CachedGISTEmbedLoss)"CachedMultipleNegativesRankingLoss)Transformerc                  B    e Zd ZdZddZddZdd	ZddZddZddZ	dS )TransformerDecoratorz
    Decorator that caches the embeddings of all layers of the transformer.
    When `layer_idx` is set, it returns the cached embeddings of that layer instead.

    This is meant to override the forward function of the Transformer.
    transformerr   returnNonec                h    || _         || _        g | _        g | _        g | _        d | _        d| _        d S Nr   )r   original_forward
embeddingslast_embeddingsfeatures	layer_idxcall_idx)selfr   r   s      j/var/www/html/ai-engine/env/lib/python3.11/site-packages/sentence_transformers/losses/AdaptiveLayerLoss.py__init__zTransformerDecorator.__init__   s:    & 0/1-/13    c                "    || _         d| _        d S r   )r   r   )r   r   s     r   set_layer_idxz"TransformerDecorator.set_layer_idx#   s    "r   r   c                R     t          j         fd j        D             d          S )Nc                *    g | ]}|j                  S  )r   ).0	embeddingr   s     r   
<listcomp>z=TransformerDecorator.get_layer_embeddings.<locals>.<listcomp>(   s     XXX9Yt~6XXXr      dimtorchconcatr   r   s   `r   get_layer_embeddingsz)TransformerDecorator.get_layer_embeddings'   s0    |XXXXXXX^_````r   dict[str, Tensor]c                    | j         |                     |          }n%|                     |          }| xj        dz  c_        |S )Nr&   )r   call_grow_cachecall_use_cacher   r   r   outputs      r   __call__zTransformerDecorator.__call__*   sG    >!))(33FF((22FMMQMMr   r   c                   | j         j        j        j        }d| j         j        j        _        |                     |          }t          |d                   dz
  | _        | j                            |d         dd                    | j	                            |d                    | j
                            d |                                D                        || j         j        j        _        |r|d= |S )z
        Temporarily sets the output_hidden_states to True, runs the model, and then restores the original setting.
        Use the all_layer_embeddings to get the embeddings of all layers.
        Tall_layer_embeddingsr&   token_embeddingsc                "    i | ]\  }}|d v	||S ))r6   r8   r"   )r#   keyvalues      r   
<dictcomp>z8TransformerDecorator.call_grow_cache.<locals>.<dictcomp>A   s)    tttJCCGs<s<sS%<s<s<sr   )r   
auto_modelconfigoutput_hidden_statesr   len
num_layersr   appendr   r   items)r   r   original_output_hidden_statesr3   s       r   r0   z$TransformerDecorator.call_grow_cache2   s    
 )-(8(C(J(_%BF#*?&&x00 f%;<==Av&<=adCDDD##F+=$>???tt&,,..ttt	
 	
 	

 C`#*?( 	/-.r   c                h    i | j         | j                 d| j        | j                 | j                 iS )Nr8   )r   r   r   r   )r   r   s     r   r1   z#TransformerDecorator.call_use_cacheL   s5    s$-.s0BDOTXTaDbcgcqDrsssr   N)r   r   r   r   r   r   r   r   )r   r.   r   r.   r   r.   )
__name__
__module____qualname____doc__r   r   r-   r4   r0   r1   r"   r   r   r   r      s               a a a a      4t t t t t tr   r   c                  *    e Zd ZdZddZddZdd	Zd
S )ForwardDecoratorz
    Decorator that caches the embeddings after all modules (e.g. pooling) of the model.
    Required to get the embeddings after all modules for the KL-divergence loss.

    This is meant to override the forward function of the SentenceTransformer.
    r   r   c                "    || _         g | _        d S )N)fnr   )r   rP   s     r   r   zForwardDecorator.__init__X   s    r   r   r.   c                p    |                      |          }| j                            |d                    |S )Nsentence_embedding)rP   r   rB   r2   s      r   r4   zForwardDecorator.__call__\   s4    ""v&:;<<<r   r   c                J    t          j        | j        d          }g | _        |S )Nr   r'   r)   )r   r   s     r   get_embeddingszForwardDecorator.get_embeddingsa   s%    \$/q999
r   NrF   rH   rG   )rI   rJ   rK   rL   r   r4   rT   r"   r   r   rN   rN   P   sZ               
     r   rN   c                  T     e Zd Z	 	 	 	 	 dd fdZddZddZedd            Z xZS ) AdaptiveLayerLossr&         ?333333?modelr	   loss	nn.Modulen_layers_per_stepintlast_layer_weightfloatprior_layers_weightkl_div_weightkl_temperaturer   r   c                   t                                                       || _        || _        || _        || _        || _        || _        || _        t          | j        d         t                    sJ t          |t                    rt          j        dd           t          |t                    rt          j        dd           dS dS )a4  
        The AdaptiveLayerLoss can be seen as a loss *modifier* that allows you to use other loss functions at non-final
        layers of the Sentence Transformer model. This is useful for when you want to train a model where users have
        the option to lower the number of layers used to improve their inference speed and memory usage.

        Args:
            model: SentenceTransformer model
            loss: The loss function to be used, e.g.
                :class:`MultipleNegativesRankingLoss`,
                :class:`CoSENTLoss`, etc.
            n_layers_per_step: The number of layers to use per step. If
                -1, then all layers are used. If > 0, then a random
                sample of `n_layers_per_step` layers are used per step,
                separate from the final layer, which is always used. The
                2DMSE paper uses `n_layers_per_step=1`. The default
                value is 1.
            last_layer_weight: The weight to use for the loss of the
                final layer. Increase this to focus more on the
                performance when using all layers. The default value is
                1.0.
            prior_layers_weight: The weight to use for the loss of the
                prior layers. Increase this to focus more on the
                performance when using fewer layers. The default value
                is 1.0.
            kl_div_weight: The weight to use for the KL-divergence loss
                that is used to make the prior layers match that of the
                last layer. Increase this to focus more on the
                performance when using fewer layers. The default value
                is 1.0.
            kl_temperature: The temperature to use for the KL-divergence
                loss. If 0, then the KL-divergence loss is not used. The
                default value is 1.0.

        References:
            - The concept was inspired by the 2DMSE paper: https://arxiv.org/abs/2402.14776
            - `Adaptive Layers <../../examples/training/adaptive_layer/README.html>`_

        Requirements:
            1. The base loss cannot be :class:`CachedMultipleNegativesRankingLoss` or :class:`CachedGISTEmbedLoss`.

        Inputs:
            +---------------------------------------+--------+
            | Texts                                 | Labels |
            +=======================================+========+
            | any                                   | any    |
            +---------------------------------------+--------+

        Relations:
            - :class:`Matryoshka2dLoss` uses this loss in combination with :class:`MatryoshkaLoss` which allows for
                output dimensionality reduction for faster downstream tasks (e.g. retrieval).

        Example:
            ::

                from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
                from datasets import Dataset

                model = SentenceTransformer("microsoft/mpnet-base")
                train_dataset = Dataset.from_dict({
                    "anchor": ["It's nice weather outside today.", "He drove to work."],
                    "positive": ["It's so sunny.", "He took the car to the office."],
                })
                loss = losses.MultipleNegativesRankingLoss(model=model)
                loss = losses.AdaptiveLayerLoss(model, loss)

                trainer = SentenceTransformerTrainer(
                    model=model,
                    train_dataset=train_dataset,
                    loss=loss,
                )
                trainer.train()
        r   zIMatryoshkaLoss is not compatible with CachedMultipleNegativesRankingLoss.   )
stacklevelz:MatryoshkaLoss is not compatible with CachedGISTEmbedLoss.N)superr   rY   rZ   r\   r^   r`   ra   rb   
isinstancer   r   warningswarnr
   )	r   rY   rZ   r\   r^   r`   ra   rb   	__class__s	           r   r   zAdaptiveLayerLoss.__init__h   s    d 	
	!2!2#6 *,$*Q-55555d>?? 	uMersttttd/00 	fMVcdeeeeee	f 	fr   sentence_featuresIterable[dict[str, Tensor]]labelsr   c                   | j         d         j        }t          | j         d         |          }|| j         d         _        | j         j        }t          |          }|| j         _        |                     ||          | j        z  }| j        dk    r2|                                }t          j	        || j        z  d          }|j
        }	t          |	dz
            }
| j        dk    r(| j        |	dz
  k     rt          j        |
| j                  }
|
D ]}|                    |           |                     ||          }||d|z   z  t!          |
          z  | j        z  z   }| j        dk    r\|                                }t          j        t          j        || j        z  d          |d          }||| j        z  | j        z  z   }|| j         d         _        || j         _        |S )Nr   r7   r'   r&   	batchmean)	reduction)rY   forwardr   rN   rZ   r^   rb   rT   FsoftmaxrA   ranger\   randomsampler   r@   r`   kl_divlog_softmaxra   )r   rk   rm   original_transformer_forwardtransformer_decoratorr   forward_decoratorrZ   final_embeddingsrA   layer_indicesr   
layer_lossr   kl_div_losss                  r   rq   zAdaptiveLayerLoss.forward   s   '+z!}'<$ 4TZ]D` a a 5
1  :-,-=>>.

 yy*F33d6LL""0??AA y)9D<O)OUWXXX*5
j1n--!A%%$*@:PQ>*Q*Q"M-9OPPM ' 	U 	UI!//	:::#4f==J*I6]9K9KKdNfffD "Q&&.==??
hM*t/B"BKKK$)  
 kD,??$BTTT <
1-
r   dict[str, Any]c                d    | j         j        j        | j        | j        | j        | j        | j        dS )N)rZ   r\   r^   r`   ra   rb   )rZ   rj   rI   r\   r^   r`   ra   rb   r,   s    r   get_config_dictz!AdaptiveLayerLoss.get_config_dict   s:    I'0!%!7!%!7#'#;!/"1
 
 	
r   strc                    dS )Nz
@misc{li20242d,
    title={2D Matryoshka Sentence Embeddings},
    author={Xianming Li and Zongxi Li and Jing Li and Haoran Xie and Qing Li},
    year={2024},
    eprint={2402.14776},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}
r"   r,   s    r   citationzAdaptiveLayerLoss.citation  s    	 	r   )r&   rW   rW   rW   rX   )rY   r	   rZ   r[   r\   r]   r^   r_   r`   r_   ra   r_   rb   r_   r   r   )rk   rl   rm   r   r   r   )r   r   )r   r   )	rI   rJ   rK   r   rq   r   propertyr   __classcell__)rj   s   @r   rV   rV   g   s        
 "##&%(" #^f ^f ^f ^f ^f ^f ^f@- - - -^
 
 
 
 
 
 
 X
 
 
 
 
r   rV   )
__future__r   ru   rh   collections.abcr   typingr   r*   r   r   torch.nnr   rr   sentence_transformersr	   0sentence_transformers.losses.CachedGISTEmbedLossr
   ?sentence_transformers.losses.CachedMultipleNegativesRankingLossr   sentence_transformers.modelsr   r   rN   ModulerV   r"   r   r   <module>r      sa   " " " " " "   $ $ $ $ $ $                $ $ $ $ $ $ 5 5 5 5 5 5 P P P P P P n n n n n n 4 4 4 4 4 4;t ;t ;t ;t ;t ;t ;t ;t|       .e e e e e	 e e e e er   