
    Ng                     x    d Z ddlmZmZ ddlmZ ddlmZmZ ddl	m
Z
mZmZmZ ddlmZ  G d de
e          Zd	S )
zOllama embeddings models.    )ListOptional)
Embeddings)AsyncClientClient)	BaseModel
ConfigDictPrivateAttrmodel_validator)Selfc                      e Zd ZU dZeed<   	 dZee         ed<   	 i Zee	         ed<   	  e
d          Zeed<   	  e
d          Zeed<   	  ed	
          Z ed          defd            Zdee         deee                  fdZdedee         fdZdee         deee                  fdZdedee         fdZdS )OllamaEmbeddingsu  Ollama embedding model integration.

    Set up a local Ollama instance:
        Install the Ollama package and set up a local Ollama instance
        using the instructions here: https://github.com/ollama/ollama .

        You will need to choose a model to serve.

        You can view a list of available models via the model library (https://ollama.com/library).

        To fetch a model from the Ollama model library use ``ollama pull <name-of-model>``.

        For example, to pull the llama3 model:

        .. code-block:: bash

            ollama pull llama3

        This will download the default tagged version of the model.
        Typically, the default points to the latest, smallest sized-parameter model.

        * On Mac, the models will be downloaded to ~/.ollama/models
        * On Linux (or WSL), the models will be stored at /usr/share/ollama/.ollama/models

        You can specify the exact version of the model of interest
        as such ``ollama pull vicuna:13b-v1.5-16k-q4_0``.

        To view pulled models:

        .. code-block:: bash

            ollama list

        To start serving:

        .. code-block:: bash

            ollama serve

        View the Ollama documentation for more commands.

        .. code-block:: bash

            ollama help

    Install the langchain-ollama integration package:
        .. code-block:: bash

            pip install -U langchain_ollama

    Key init args — completion params:
        model: str
            Name of Ollama model to use.
        base_url: Optional[str]
            Base url the model is hosted under.

    See full list of supported init args and their descriptions in the params section.

    Instantiate:
        .. code-block:: python

            from langchain_ollama import OllamaEmbeddings

            embed = OllamaEmbeddings(
                model="llama3"
            )

    Embed single text:
        .. code-block:: python

            input_text = "The meaning of life is 42"
            vector = embed.embed_query(input_text)
            print(vector[:3])

        .. code-block:: python

            [-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]

    Embed multiple texts:
        .. code-block:: python

             input_texts = ["Document 1...", "Document 2..."]
            vectors = embed.embed_documents(input_texts)
            print(len(vectors))
            # The first 3 coordinates for the first vector
            print(vectors[0][:3])

        .. code-block:: python

            2
            [-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]

    Async:
        .. code-block:: python

            vector = await embed.aembed_query(input_text)
           print(vector[:3])

            # multiple:
            # await embed.aembed_documents(input_texts)

        .. code-block:: python

            [-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188]
    modelNbase_urlclient_kwargs)default_client_async_clientforbid)extraafter)modereturnc                 x    | j         pi }t          dd| j        i|| _        t	          dd| j        i|| _        | S )zSet clients to use for ollama.host )r   r   r   r   r   r   )selfr   s     W/var/www/html/ai-engine/env/lib/python3.11/site-packages/langchain_ollama/embeddings.py_set_clientszOllamaEmbeddings._set_clients   sO     *0bBB4=BMBB(MMdmM}MM    textsc                 R    | j                             | j        |          d         }|S )Embed search docs.
embeddings)r   embedr   r   r!   embedded_docss      r   embed_documentsz OllamaEmbeddings.embed_documents   s&    **4:u==lKr    textc                 :    |                      |g          d         S )Embed query text.r   )r(   r   r)   s     r   embed_queryzOllamaEmbeddings.embed_query   s    ##TF++A..r    c                 b   K   | j                             | j        |           d{V d         }|S )r#   Nr$   )r   r%   r   r&   s      r   aembed_documentsz!OllamaEmbeddings.aembed_documents   sB      #177
EJJJJJJJJ
 r    c                 J   K   |                      |g           d{V d         S )r+   Nr   )r/   r,   s     r   aembed_queryzOllamaEmbeddings.aembed_query   s3      ++TF33333333Q77r    )__name__
__module____qualname____doc__str__annotations__r   r   r   dictr
   r   r   r   r   r	   model_configr   r   r   r   floatr(   r-   r/   r1   r   r    r   r   r      s        h hT JJJ"Hhsm"""-$&M8D>&&& "k$///GV/// "-T!:!:!:M;::: :  L _'"""d    #"T#Y 4U3D    
/ /U / / / /DI $tE{:K    8s 8tE{ 8 8 8 8 8 8r    r   N)r5   typingr   r   langchain_core.embeddingsr   ollamar   r   pydanticr   r	   r
   r   typing_extensionsr   r   r   r    r   <module>r@      s            
 1 0 0 0 0 0 & & & & & & & &            # " " " " "^8 ^8 ^8 ^8 ^8y* ^8 ^8 ^8 ^8 ^8r    