
    Ngf0                        d dl mZ d dlZd dlmZ d dlmZmZmZm	Z	m
Z
mZ d dlmZ d dlmZ d dlmZ d dlmZmZ d d	lmZ d d
lmZmZ  ej        e          Z G d de          ZdS )    )annotationsN)Path)AnyDictIteratorListOptionalUnion)CallbackManagerForLLMRun)LLM)GenerationChunk)get_pydantic_field_namespre_init)_build_model_kwargs)Fieldmodel_validatorc                     e Zd ZU dZdZded<   ded<   	 dZded<   	 dZded	<   	  ed
d          Z	ded<   	  edd          Z
ded<   	  edd          Zded<   	  edd          Zded<   	  edd          Zded<   	  edd          Zded<   	  edd          Zded<   	  edd          Zded<   	  edd          Zded<   	  edd          Zded<   	  ed          Zded<   	 dZded<   	 d Zd!ed"<   	 d#Zd!ed$<   	  ed          Zded%<   	 dZd&ed'<   	 g Zd(ed)<   	 d*Zd!ed+<   	 d,Zded-<   	 d.Zded/<   	 dZd&ed0<   	 d1Zd2ed3<   	 d4Zd2ed5<   	  ee 6          Z!d7ed8<   	 dZ"ded9<   	 dZ#d:ed;<   	 dZ$d<ed=<   	 dZ%ded><   	 e&dSdB            Z' e(dCD          e)dTdE                        Z*e+dUdF            Z,e+dUdG            Z-e+dVdH            Z.dWdXdIZ/	 	 dYdZdNZ0	 	 dYd[dPZ1d\dRZ2dS )]LlamaCppa  llama.cpp model.

    To use, you should have the llama-cpp-python library installed, and provide the
    path to the Llama model as a named parameter to the constructor.
    Check out: https://github.com/abetlen/llama-cpp-python

    Example:
        .. code-block:: python

            from langchain_community.llms import LlamaCpp
            llm = LlamaCpp(model_path="/path/to/llama/model")
    Nr   clientstr
model_pathzOptional[str]	lora_base	lora_pathi   n_ctx)aliasintn_partsseedTf16_kvboolF
logits_all
vocab_only	use_mlock	n_threadszOptional[int]   n_batchn_gpu_layerssuffix   
max_tokensg?zOptional[float]temperaturegffffff?top_plogprobszOptional[bool]echoOptional[List[str]]stopg?repeat_penalty(   top_k@   last_n_tokens_sizeuse_mmapg      ?floatrope_freq_scaleg     @rope_freq_base)default_factoryDict[str, Any]model_kwargs	streamingzOptional[Union[str, Path]]grammar_pathzOptional[Union[str, Any]]grammarverbosevaluesr   returnc                   	 ddl m}m} n# t          $ r t          d          w xY wd         }g d}fd|D             }d         d         |d<   |                    d	                    	  ||fi |d
<   n'# t
          $ r}t          d| d|           d}~ww xY wd         r.d         r&d         }d         }	t          d|d|	d          t          d         t                    r|	                    d                   d<   n(d         r|
                    d                   d<   n	 S )z4Validate that llama-cpp-python library is installed.r   )LlamaLlamaGrammarzCould not import llama-cpp-python library. Please install the llama-cpp-python library to use this embedding model: pip install llama-cpp-pythonr   )r9   r:   r   r   r   r   r   r    r"   r#   r$   r%   r'   r7   r6   rA   c                "    i | ]}||         S  rH   ).0krB   s     ]/var/www/html/ai-engine/env/lib/python3.11/site-packages/langchain_community/llms/llamacpp.py
<dictcomp>z1LlamaCpp.validate_environment.<locals>.<dictcomp>   s    @@@6!9@@@    r(   Nr=   r   z&Could not load Llama model from path: z. Received error r@   r?   zCCan only pass in one of grammar and grammar_path. Received grammar=z and grammar_path=.)	llama_cpprE   rF   ImportErrorupdate	Exception
ValueError
isinstancer   from_string	from_file)
clsrB   rE   rF   r   model_param_namesmodel_paramser@   r?   s
    `        rK   validate_environmentzLlamaCpp.validate_environment   s
   	555555555 	 	 	I  	 L)

 
 
$ A@@@.?@@@.!-+1.+AL(F>2333	$uZ@@<@@F8 	 	 	& & &"#& &  	 ) 	!7 	Y'G!.1L33 3".3 3 3   y)3// 	 , 8 8	9J K KF9N# 	 , 6 6vn7M N NF9s     &2A? ?
B#	BB#before)modec                D    t          |           }t          ||          }|S )z>Build extra kwargs from additional params that were passed in.)r   r   )rW   rB   all_required_field_namess      rK   build_model_kwargszLlamaCpp.build_model_kwargs   s'     $<C#@#@ $V-EFFrM   c           
         | j         | j        | j        | j        | j        | j        | j        | j        | j        d	}| j	        r
| j	        |d<   |S )z1Get the default parameters for calling llama_cpp.)	r)   r+   r,   r-   r.   r/   stop_sequencesr2   r4   r@   )
r)   r+   r,   r-   r.   r/   r1   r2   r4   r@   )selfparamss     rK   _default_paramszLlamaCpp._default_params   s\     k/+ZI"i"1Z

 

 < 	- $F9rM   c                &    i d| j         i| j        S )zGet the identifying parameters.r   )r   re   rc   s    rK   _identifying_paramszLlamaCpp._identifying_params   s     K<1JT5IJJrM   c                    dS )zReturn type of llm.llamacpprH   rg   s    rK   	_llm_typezLlamaCpp._llm_type   s	     zrM   c                    | j         r|t          d          | j        }|                    d           | j         p|pg |d<   |S )a  
        Performs sanity check, preparing parameters in format needed by llama_cpp.

        Args:
            stop (Optional[List[str]]): List of stop sequences for llama_cpp.

        Returns:
            Dictionary containing the combined parameters.
        Nz2`stop` found in both the input and default params.rb   r1   )r1   rS   re   pop)rc   r1   rd   s      rK   _get_parameterszLlamaCpp._get_parameters   s\     9 	S)QRRR% 	

#$$$ 0d0bvrM   promptrun_manager"Optional[CallbackManagerForLLMRun]kwargsc                    | j         r"d} | j        d|||d|D ]}||j        z  }|S |                     |          }i ||} | j        dd|i|}|d         d         d         S )	a  Call the Llama model and return the output.

        Args:
            prompt: The prompt to use for generation.
            stop: A list of strings to stop generation when encountered.

        Returns:
            The generated text.

        Example:
            .. code-block:: python

                from langchain_community.llms import LlamaCpp
                llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
                llm.invoke("This is a prompt.")
         )ro   r1   rp   ro   choicesr   textNrH   )r>   _streamrv   rn   r   )	rc   ro   r1   rp   rr   combined_text_outputchunkrd   results	            rK   _callzLlamaCpp._call  s    . > 	0 $& % '  	  3 3 %
2$$''))$//F))&)F T[999&99F)$Q'//rM   Iterator[GenerationChunk]c              +  D  K   i |                      |          |} | j        d
|dd|}|D ]q}|d         d                             dd          }t          |d         d         d         d|i          }	|r"|                    |	j        | j        |	           |	V  rdS )a\  Yields results objects as they are generated in real time.

        It also calls the callback manager's on_llm_new_token event with
        similar parameters to the OpenAI LLM class method of the same name.

        Args:
            prompt: The prompts to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            A generator representing the stream of tokens being generated.

        Yields:
            A dictionary like objects containing a string token and metadata.
            See llama-cpp-python docs and below for more.

        Example:
            .. code-block:: python

                from langchain_community.llms import LlamaCpp
                llm = LlamaCpp(
                    model_path="/path/to/local/model.bin",
                    temperature = 0.5
                )
                for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
                        stop=["'","
"]):
                    result = chunk["choices"][0]
                    print(result["text"], end='', flush=True)  # noqa: T201

        T)ro   streamru   r   r.   Nrv   )rv   generation_info)tokenrA   	log_probsrH   )rn   r   getr   on_llm_new_tokenrv   rA   )
rc   ro   r1   rp   rr   rd   rz   partr.   ry   s
             rK   rw   zLlamaCpp._stream,  s      J :D((..9&9BF4BB6BB 
	 
	DIq)--j$??H#)_Q'/!+X 6  E  ,,*dlh -    KKKK
	 
	rM   rv   c                z    | j                             |                    d                    }t          |          S )Nzutf-8)r   tokenizeencodelen)rc   rv   tokenized_texts      rK   get_num_tokenszLlamaCpp.get_num_tokens_  s1    --dkk'.B.BCC>"""rM   )rB   r   rC   r   )rB   r<   rC   r   )rC   r<   )rC   r   )N)r1   r0   rC   r<   )NN)
ro   r   r1   r0   rp   rq   rr   r   rC   r   )
ro   r   r1   r0   rp   rq   rr   r   rC   r|   )rv   r   rC   r   )3__name__
__module____qualname____doc__r   __annotations__r   r   r   r   r   r   r    r"   r#   r$   r%   r'   r(   r)   r+   r,   r-   r.   r/   r1   r2   r4   r6   r7   r9   r:   dictr=   r>   r?   r@   rA   r   r[   r   classmethodr`   propertyre   rh   rk   rn   r{   rw   r   rH   rM   rK   r   r      s          FOOO+#I####0#I####As'***E****59---G----? b'''D''''-5X...F....1uU,777J7777@uU,777J7777/eE555I5555,$uT===I====C #U1I666G6666/ #(%N"C"C"CLCCCCF!E$KKF''''S #J####3#&K&&&&.!E!!!!.#eDkkH))))N D    % "D""""@&)N))))2E.(*****M#H####1 O    )#N####+#(5#>#>#>L>>>>?I8/3L3333 *.G---- G): : : X:x _(###   [ $#    X" K K K XK    X    8 %):>	(0 (0 (0 (0 (0Z %):>	1 1 1 1 1f# # # # # #rM   r   )
__future__r   loggingpathlibr   typingr   r   r   r   r	   r
   langchain_core.callbacksr   #langchain_core.language_models.llmsr   langchain_core.outputsr   langchain_core.utilsr   r   langchain_core.utils.utilsr   pydanticr   r   	getLoggerr   loggerr   rH   rM   rK   <module>r      s*   " " " " " "        = = = = = = = = = = = = = = = = = = = = = = 3 3 3 3 3 3 2 2 2 2 2 2 C C C C C C C C : : : : : : + + + + + + + +		8	$	$P# P# P# P# P#s P# P# P# P# P#rM   