
    Ng6                     h    d dl mZmZmZmZ d dlmZ d dlmZm	Z	m
Z
 dZdZdZ G d dee          Zd	S )
    )AnyDictListOptional)
Embeddings)	BaseModel
ConfigDictFieldzBAAI/bge-small-en-v1.5z9Represent this question for searching relevant passages: u9   为这个句子生成表示以用于检索相关文章：c                   V    e Zd ZU dZdZeed<   eZe	ed<   	 dZ
ee	         ed<   	  ee          Zee	ef         ed<   	  ee          Zee	ef         ed<   	 eZe	ed	<   	 d
Ze	ed<   	 def fdZ edd          Zdee	         deee                  fdZde	dee         fdZ xZS )IpexLLMBgeEmbeddingsa  Wrapper around the BGE embedding model
    with IPEX-LLM optimizations on Intel CPUs and GPUs.

    To use, you should have the ``ipex-llm``
    and ``sentence_transformers`` package installed. Refer to
    `here <https://python.langchain.com/v0.1/docs/integrations/text_embedding/ipex_llm/>`_
    for installation on Intel CPU.

    Example on Intel CPU:
        .. code-block:: python

            from langchain_community.embeddings import IpexLLMBgeEmbeddings

            embedding_model = IpexLLMBgeEmbeddings(
                model_name="BAAI/bge-large-en-v1.5",
                model_kwargs={},
                encode_kwargs={"normalize_embeddings": True},
            )

    Refer to
    `here <https://python.langchain.com/v0.1/docs/integrations/text_embedding/ipex_llm_gpu/>`_
    for installation on Intel GPU.

    Example on Intel GPU:
        .. code-block:: python

            from langchain_community.embeddings import IpexLLMBgeEmbeddings

            embedding_model = IpexLLMBgeEmbeddings(
                model_name="BAAI/bge-large-en-v1.5",
                model_kwargs={"device": "xpu"},
                encode_kwargs={"normalize_embeddings": True},
            )
    Nclient
model_namecache_folder)default_factorymodel_kwargsencode_kwargsquery_instruction embed_instructionkwargsc                 z    t                      j        di | 	 ddl}ddlm}m} n+# t          $ r}d}t          d| d| d          |d}~ww xY wd| j        vr
d	| j        d<   | j        d         d
vrt          d| j        d          d           |j	        | j
        fd| j        i| j        | _         || j                  | _         || j                  | _        | j        d         dk    r1| j                                                            d          | _        d| j
        v rt          | _        dS dS )z$Initialize the sentence_transformer.r   N)_optimize_post_optimize_prezChttps://python.langchain.com/v0.1/docs/integrations/text_embedding/zDCould not import ipex_llm or sentence_transformers. Please refer to zD/ipex_llm/ for install required packages on Intel CPU. And refer to z;/ipex_llm_gpu/ for install required packages on Intel GPU. devicecpu)r   xpuzXIpexLLMBgeEmbeddings currently only supports device to be 'cpu' or 'xpu', but you have: .r   r   z-zh )super__init__sentence_transformersipex_llm.transformers.convertr   r   ImportErrorr   
ValueErrorSentenceTransformerr   r   r   halfto DEFAULT_QUERY_BGE_INSTRUCTION_ZHr   )selfr   r!   r   r   excbase_url	__class__s          c/var/www/html/ai-engine/env/lib/python3.11/site-packages/langchain_community/embeddings/ipex_llm.pyr    zIpexLLMBgeEmbeddings.__init__C   s   ""6"""	((((SSSSSSSSS 
	 
	 
	U  ?#+? ? !)? ? ?  	
	 4,,,*/Dh'X&n<<P151B81LP P P  
 @+?O
 
*.*;
?C?P
 

 $mDK00$nT[11X&%//+**,,//66DKDO##%ED""" $#s   ) 
AAAforbidr   )extraprotected_namespacestextsreturnc                 x      fd|D             }  j         j        |fi  j        }|                                S )zCompute doc embeddings using a HuggingFace transformer model.

        Args:
            texts: The list of texts to embed.

        Returns:
            List of embeddings, one for each text.
        c                 L    g | ] }j         |                    d d          z   !S )
 )r   replace).0tr)   s     r-   
<listcomp>z8IpexLLMBgeEmbeddings.embed_documents.<locals>.<listcomp>x   s/    NNN1'!))D#*>*>>NNN    )r   encoder   tolist)r)   r1   
embeddingss   `  r-   embed_documentsz$IpexLLMBgeEmbeddings.embed_documentso   sP     ONNNNNN'T['DD1CDD
  """r;   textc                     |                     dd          } | j        j        | j        |z   fi | j        }|                                S )zCompute query embeddings using a HuggingFace transformer model.

        Args:
            text: The text to embed.

        Returns:
            Embeddings for the text.
        r5   r6   )r7   r   r<   r   r   r=   )r)   r@   	embeddings      r-   embed_queryz IpexLLMBgeEmbeddings.embed_query|   s\     ||D#&&&DK&"T)
 
-1-?
 
	 !!!r;   )__name__
__module____qualname____doc__r   r   __annotations__DEFAULT_BGE_MODELr   strr   r   r
   dictr   r   r    DEFAULT_QUERY_BGE_INSTRUCTION_ENr   r   r    r	   model_configr   floatr?   rC   __classcell__)r,   s   @r-   r   r      s{        ! !F FC'J'''"&L(3-&&&K#(5#>#>#>L$sCx.>>>1$)E$$?$?$?M4S>???R=s===1s4(F (F (F (F (F (F (FT :H2FFFL#T#Y #4U3D # # # #" "U " " " " " " " "r;   r   N)typingr   r   r   r   langchain_core.embeddingsr   pydanticr   r	   r
   rI   rL   r(   r   r   r;   r-   <module>rS      s    - , , , , , , , , , , , 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1, ? ! $_  y" y" y" y" y"9j y" y" y" y" y"r;   