
    Ng                    D    d Z ddlmZ ddlZddlmZ  G d d          ZdS )z
This file contains the templating for model cards prior to the v3.0 release. It still exists to be used alongside
SentenceTransformer.old_fit for backwards compatibility, but will be removed in a future release.
    )annotationsN   )fullnamec                  `    e Zd Zg dZdddddddZdZd	Zd
Zed             Z	ed             Z
dS )ModelCardTemplate)zsentence-transformerszfeature-extractionsentence-similarityr   z"<!--- Describe your model here --> z/<!--- Describe how your model was evaluated -->z9<!--- Describe where people can find more information -->)z{PIPELINE_TAG}z{MODEL_DESCRIPTION}z{TRAINING_SECTION}z{USAGE_TRANSFORMERS_SECTION}z{EVALUATION}z{CITING}as  
---
library_name: sentence-transformers
pipeline_tag: {PIPELINE_TAG}
tags:
{TAGS}
{DATASETS}
---

# {MODEL_NAME}

This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a {NUM_DIMENSIONS} dimensional dense vector space and can be used for tasks like clustering or semantic search.

{MODEL_DESCRIPTION}

## Usage (Sentence-Transformers)

Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:

```
pip install -U sentence-transformers
```

Then you can use the model like this:

```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]

model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```

{USAGE_TRANSFORMERS_SECTION}

## Evaluation Results

{EVALUATION}

For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})

{TRAINING_SECTION}

## Full Model Architecture
```
{FULL_MODEL_STR}
```

## Citing & Authors

{CITING}

z
## Training
The model was trained with the parameters:

{LOSS_FUNCTIONS}

Parameters of the fit()-Method:
```
{FIT_PARAMETERS}
```
a  

## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.

```python
from transformers import AutoTokenizer, AutoModel
import torch

{POOLING_FUNCTION}

# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']

# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')

# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')

# Compute token embeddings
with torch.no_grad():
    model_output = model(**encoded_input)

# Perform pooling. In this case, {POOLING_MODE} pooling.
sentence_embeddings = {POOLING_FUNCTION_NAME}(model_output, encoded_input['attention_mask'])

print("Sentence embeddings:")
print(sentence_embeddings)
```

c                6    | dk    rdS | dk    rdS | dk    rdS d S )Nmax)max_poolinga  
# Max Pooling - Take the max value over time for every dimension.
def max_pooling(model_output, attention_mask):
    token_embeddings = model_output[0] #First element of model_output contains all token embeddings
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    token_embeddings[input_mask_expanded == 0] = -1e9  # Set padding tokens to large negative value
    return torch.max(token_embeddings, 1)[0]
mean)mean_poolinga  
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
    token_embeddings = model_output[0] #First element of model_output contains all token embeddings
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
cls)cls_poolingzP
def cls_pooling(model_output, attention_mask):
    return model_output[0][:,0]
 )pooling_modes    f/var/www/html/ai-engine/env/lib/python3.11/site-packages/sentence_transformers/model_card_templates.pymodel_card_get_pooling_functionz1ModelCardTemplate.model_card_get_pooling_function|   sM    5  
 
 V##	 	 U""  #"    c                   	 t          | d          r|                                 }nli }t          | d          r| j        nd|d<   t          | d          rt          | j                  |d<   t          | d          rt          | j                  |d<   dt          |            dt          |            d| d	}d
                    t          |          t          |d          rd|                                 dnd          }||gS # t          $ r/}t          j
        dt          |                      Y d }~dS d }~ww xY w)Nget_config_dict
batch_sizeunknownsamplerbatch_samplerz**DataLoader**:

`z` of length z with parameters:
```
z
```z**Loss**:

`{}` {}zwith parameters:
  ```
  z
  ```r	   z2Exception when creating get_train_objective_info: )hasattrr   r   r   r   r   lenformat	ExceptionloggingWARNstr)
dataloaderlossloader_paramsdataloader_strloss_stres         r   get_train_objective_infoz*ModelCardTemplate.get_train_objective_info   s   	z#455 X * : : < < "GNz[gGhGh.wj.C.Cnwl+:y11 L/7
8J/K/KM),:77 X5=j>V5W5WM/2hz6J6J  X[\fXgXg    N
 .44
 4!233	 		 	 	 	
  H #H-- 	 	 	LVcRSffVVWWW22222	s   DD 
E$D<<EN)__name__
__module____qualname____TAGS____DEFAULT_VARS____MODEL_CARD____TRAINING_SECTION____USAGE_TRANSFORMERS__staticmethodr   r)   r   r   r   r   r      s        UUUH/C (*IO 5Nn
B     \ D   \  r   r   )__doc__
__future__r   r    utilr   r   r   r   r   <module>r6      sz    
 # " " " " "       r r r r r r r r r rr   