
    Ng                       d dl Z d dlmZ d dlmZmZmZmZmZm	Z	m
Z
mZmZmZmZmZ d dlZd dlmZ d dlmZ d dlmZmZ d dlmZmZmZmZmZmZmZm Z  d dl!m"Z"m#Z# d d	l$m%Z% d d
l&m'Z'm(Z(m)Z)m*Z* d dl+m,Z,m-Z-m.Z. d dl/m0Z0m1Z1m2Z2 d dl3m4Z4 d dl5m6Z6m7Z7 d dl8m9Z9 d dl:m;Z; d dl<m=Z=m>Z>m?Z? d dlm@Z@ dedeeAef         fdZBdee         deeeAef                  fdZCdedeDfdZE G d de          ZF G d de          ZGdS )    N)
itemgetter)AnyCallableDictIteratorListLiteralOptionalSequenceTupleTypeUnioncast)CallbackManagerForLLMRun)LanguageModelInput)BaseChatModelgenerate_from_stream)	AIMessageAIMessageChunkBaseMessageBaseMessageChunkChatMessageHumanMessageSystemMessageToolMessage)JsonOutputParserPydanticOutputParser)OutputParserLike)JsonOutputKeyToolsParserPydanticToolsParsermake_invalid_tool_callparse_tool_call)ChatGenerationChatGenerationChunk
ChatResult)RunnableRunnableMapRunnablePassthrough)BaseTool)convert_to_secret_strget_from_dict_or_envconvert_to_openai_tool)is_basemodel_subclass)	BaseModelField	SecretStr)Responsemessagereturnc                    i }t          | t                    r| j        | j        d}nt          | t                    rd| j        d}nt          | t
                    rd| j        d}nt          | t                    r5d| j        d}d| j        v r!| j        d         |d<   |d         dk    rd|d<   n8t          | t                    rd	| j        | j	        d
}nt          d|            |S )z
    convert a BaseMessage to a dictionary with Role / content

    Args:
        message: BaseMessage

    Returns:
        messages_dict:  role / content dict
    )rolecontentsystemuser	assistant
tool_callsr7    Ntool)r6   r7   tool_call_idGot unknown type )
isinstancer   r6   r7   r   r   r   additional_kwargsr   r>   	TypeError)r3   message_dicts     e/var/www/html/ai-engine/env/lib/python3.11/site-packages/langchain_community/chat_models/sambanova.py_convert_message_to_dictrE   :   s    $&L';'' 7 'II	G]	+	+ 7 (W_EE	G\	*	* 7 &7?CC	GY	'	' 7 +HH7444)0)B<)PL&I&",,*.Y'	G[	)	) 7#0
 
 5G55666    messagesc                     d | D             }|S )z
    Convert a list of BaseMessages to a list of dictionaries with Role / content

    Args:
        messages: list of BaseMessages

    Returns:
        messages_dicts:  list of role / content dicts
    c                 ,    g | ]}t          |          S  )rE   ).0ms     rD   
<listcomp>z)_create_message_dicts.<locals>.<listcomp>f   s!    CCCQ-a00CCCrF   rJ   )rG   message_dictss     rD   _create_message_dictsrO   \   s     DC(CCCMrF   objc                 J    t          | t                    ot          |           S N)r@   typer.   )rP   s    rD   _is_pydantic_classrT   j   s     c4  ?%:3%?%??rF   c                       e Zd ZU dZ ed          Zeed<   	  e ed                    Z	eed<   	  ed          Z
eed<   	  ed          Zeed	<   	  ed
          Zeed<   	  ed          Zeed<   	  ed          Zee         ed<   	  ed          Zee         ed<   	  eddi          Zeeef         ed<   	  ei           Zeeef         ed<   	  G d d          Zedefd            Zedeeef         fd            Zedeeef         fd            Zedefd            Zdeddf fdZdddde e!eeef         e"e         e#d ef         e$f                  d!ee!eeef         eef                  d"ee         dede%e&e'f         f
 fd#Z(	 d5d$dd%d&ee!eeef         e"e)         f                  d'e*d(         d)edede%e&e!eeef         e)f         f         f
d*Z+	 	 d6d+e,eeef                  d,ee,e                  d	edede-f
d-Z.d.e-de/fd/Z0d.e-de1e2         fd0Z3	 	 d7d1e,e'         d,ee,e                  d2ee4         dede5f
d3Z6	 	 d7d1e,e'         d,ee,e                  d2ee4         dede1e7         f
d4Z8 xZ9S )8ChatSambaNovaCloudu  
    SambaNova Cloud chat model.

    Setup:
        To use, you should have the environment variables:
        `SAMBANOVA_URL` set with your SambaNova Cloud URL.
        `SAMBANOVA_API_KEY` set with your SambaNova Cloud API Key.
        http://cloud.sambanova.ai/
        Example:
        .. code-block:: python
            ChatSambaNovaCloud(
                sambanova_url = SambaNova cloud endpoint URL,
                sambanova_api_key = set with your SambaNova cloud API key,
                model = model name,
                max_tokens = max number of tokens to generate,
                temperature = model temperature,
                top_p = model top p,
                top_k = model top k,
                stream_options = include usage to get generation metrics
            )

    Key init args — completion params:
        model: str
            The name of the model to use, e.g., Meta-Llama-3-70B-Instruct.
        streaming: bool
            Whether to use streaming handler when using non streaming methods
        max_tokens: int
            max tokens to generate
        temperature: float
            model temperature
        top_p: float
            model top p
        top_k: int
            model top k
        stream_options: dict
            stream options, include usage to get generation metrics

    Key init args — client params:
        sambanova_url: str
            SambaNova Cloud Url
        sambanova_api_key: str
            SambaNova Cloud api key

    Instantiate:
        .. code-block:: python

            from langchain_community.chat_models import ChatSambaNovaCloud

            chat = ChatSambaNovaCloud(
                sambanova_url = SambaNova cloud endpoint URL,
                sambanova_api_key = set with your SambaNova cloud API key,
                model = model name,
                max_tokens = max number of tokens to generate,
                temperature = model temperature,
                top_p = model top p,
                top_k = model top k,
                stream_options = include usage to get generation metrics
            )

    Invoke:
        .. code-block:: python

            messages = [
                SystemMessage(content="your are an AI assistant."),
                HumanMessage(content="tell me a joke."),
            ]
            response = chat.invoke(messages)

    Stream:
        .. code-block:: python

            for chunk in chat.stream(messages):
                print(chunk.content, end="", flush=True)

    Async:
        .. code-block:: python

            response = chat.ainvoke(messages)
            await response

    Tool calling:
        .. code-block:: python

            from pydantic import BaseModel, Field

            class GetWeather(BaseModel):
                '''Get the current weather in a given location'''

                location: str = Field(
                    ...,
                    description="The city and state, e.g. Los Angeles, CA"
                )

            llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
            ai_msg = llm_with_tools.invoke("Should I bring my umbrella today in LA?")
            ai_msg.tool_calls

        .. code-block:: none

            [
                {
                    'name': 'GetWeather',
                    'args': {'location': 'Los Angeles, CA'},
                    'id': 'call_adf61180ea2b4d228a'
                }
            ]

    Structured output:
        .. code-block:: python

            from typing import Optional

            from pydantic import BaseModel, Field

            class Joke(BaseModel):
                '''Joke to tell user.'''

                setup: str = Field(description="The setup of the joke")
                punchline: str = Field(description="The punchline to the joke")

            structured_model = llm.with_structured_output(Joke)
            structured_model.invoke("Tell me a joke about cats")

        .. code-block:: python

            Joke(setup="Why did the cat join a band?",
            punchline="Because it wanted to be the purr-cussionist!")

        See `ChatSambanovaCloud.with_structured_output()` for more.

    Token usage:
        .. code-block:: python

            response = chat.invoke(messages)
            print(response.response_metadata["usage"]["prompt_tokens"]
            print(response.response_metadata["usage"]["total_tokens"]

    Response metadata
        .. code-block:: python

            response = chat.invoke(messages)
            print(response.response_metadata)

    r<   defaultsambanova_urlsambanova_api_keyzMeta-Llama-3.1-8B-InstructmodelF	streaming   
max_tokensffffff?temperatureNtop_ptop_kinclude_usageTstream_optionsadditional_headersc                       e Zd ZdZdS )ChatSambaNovaCloud.ConfigTN__name__
__module____qualname__populate_by_namerJ   rF   rD   Configrg             rF   rm   r4   c                     dS z9Return whether this model can be serialized by Langchain.FrJ   clss    rD   is_lc_serializablez%ChatSambaNovaCloud.is_lc_serializable!  	     urF   c                 
    ddiS )NrZ   rJ   selfs    rD   
lc_secretszChatSambaNovaCloud.lc_secrets&  s    #%899rF   c                 \    | j         | j        | j        | j        | j        | j        | j        dS )Return a dictionary of identifying parameters.

        This information is used by the LangChain callback system, which
        is used for tracing purposes make it possible to monitor LLMs.
        r[   r\   r^   r`   ra   rb   rd   r{   rv   s    rD   _identifying_paramsz&ChatSambaNovaCloud._identifying_params*  s8     Z/+ZZ"1
 
 	
rF   c                     dS )7Get the type of language model used by this chat model.zsambanovacloud-chatmodelrJ   rv   s    rD   	_llm_typezChatSambaNovaCloud._llm_type;  s
     *)rF   kwargsc                     t          |ddd          |d<   t          t          |dd                    |d<    t                      j        di | dS )	'init and validate environment variablesrY   SAMBANOVA_URLz,https://api.sambanova.ai/v1/chat/completionsrW   rZ   SAMBANOVA_API_KEYNrJ   )r+   r*   super__init__rw   r   	__class__s     rD   r   zChatSambaNovaCloud.__init__@  su    "6B	#
 #
 #
 '< )<>QRR'
 '
"# 	""6"""""rF   )tool_choiceparallel_tool_callstools.r   r   c                F   d |D             }|rlt          |t                    r|dvrd}nRt          |t                    r|rd}n8t          |t                    rt	          d          t	          d|           d}||d<   ||d<    t                      j        d
d	|i|S )zBind tool-like objects to this chat model

        tool_choice: does not currently support "any", choice like
        should be one of ["auto", "none", "required"]
        c                 ,    g | ]}t          |          S rJ   r,   )rK   r=   s     rD   rM   z1ChatSambaNovaCloud.bind_tools.<locals>.<listcomp>[  s!    JJJD1$77JJJrF   )autononerequiredr   r   z7tool_choice must be one of ['auto', 'none', 'required']z;Unrecognized tool_choice type. Expected str, boolReceived: r   r   r   rJ   )r@   strbooldict
ValueErrorr   bind)rw   r   r   r   r   formatted_toolsr   s         rD   
bind_toolszChatSambaNovaCloud.bind_toolsM  s     KJEJJJ 	!+s++ &BBB"(KK..  -",KK..  M   !/!,/ /  
 !K +}(;$%uww|<</<V<<<rF   function_calling)methodinclude_rawschemar   )r   	json_modejson_schemar   c                x   |t          d|           t          |          }|dk    rk|t          d          t          |          d         d         }|                     |g|          }|rt	          |gd	          }nt          |d
          }n|dk    rD| }|r1t          t          t                   |          }t          |          }n}t                      }nn|dk    rU|t          d          | }|r1t          t          t                   |          }t          |          }n"t                      }nt          d| d          |rht          j        t          d          |z  d           }	t          j        d           }
|	                    |
gd          }t          |          |z  S ||z  S )a?  Model wrapper that returns outputs formatted to match the given schema.

        Args:
            schema:
                The output schema. Can be passed in as:
                    - an OpenAI function/tool schema,
                    - a JSON Schema,
                    - a TypedDict class,
                    - or a Pydantic.BaseModel class.
                If `schema` is a Pydantic class then the model output will be a
                Pydantic instance of that class, and the model-generated fields will be
                validated by the Pydantic class. Otherwise the model output will be a
                dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
                for more on how to properly specify types and descriptions of
                schema fields when specifying a Pydantic or TypedDict class.

            method:
                The method for steering model generation, either "function_calling"
                "json_mode" or "json_schema".
                If "function_calling" then the schema will be converted
                to an OpenAI function and the returned model will make use of the
                function-calling API. If "json_mode" or "json_schema" then OpenAI's
                JSON mode will be used.
                Note that if using "json_mode" or "json_schema" then you must include instructions
                for formatting the output into the desired schema into the model call.

            include_raw:
                If False then only the parsed structured output is returned. If
                an error occurs during model output parsing it will be raised. If True
                then both the raw model response (a BaseMessage) and the parsed model
                response will be returned. If an error occurs during output parsing it
                will be caught and returned as well. The final output is always a dict
                with keys "raw", "parsed", and "parsing_error".

        Returns:
            A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.

            If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
            an instance of `schema` (i.e., a Pydantic object).

            Otherwise, if `include_raw` is False then Runnable outputs a dict.

            If `include_raw` is True, then Runnable outputs a dict with keys:
                - `"raw"`: BaseMessage
                - `"parsed"`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
                - `"parsing_error"`: Optional[BaseException]

        Example: schema=Pydantic class, method="function_calling", include_raw=False:
            .. code-block:: python

                from typing import Optional

                from langchain_community.chat_models import ChatSambaNovaCloud
                from pydantic import BaseModel, Field


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: str = Field(
                        description="A justification for the answer."
                    )


                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )

                # -> AnswerWithJustification(
                #     answer='They weigh the same',
                #     justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same.'
                # )

        Example: schema=Pydantic class, method="function_calling", include_raw=True:
            .. code-block:: python

                from langchain_community.chat_models import ChatSambaNovaCloud
                from pydantic import BaseModel


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: str


                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification, include_raw=True
                )

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'arguments': '{"answer": "They weigh the same.", "justification": "A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount."}', 'name': 'AnswerWithJustification'}, 'id': 'call_17a431fc6a4240e1bd', 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls', 'usage': {'acceptance_rate': 5, 'completion_tokens': 53, 'completion_tokens_after_first_per_sec': 343.7964936837758, 'completion_tokens_after_first_per_sec_first_ten': 439.1205661878638, 'completion_tokens_per_sec': 162.8511306784833, 'end_time': 1731527851.0698032, 'is_last_response': True, 'prompt_tokens': 213, 'start_time': 1731527850.7137961, 'time_to_first_token': 0.20475482940673828, 'total_latency': 0.32545061111450196, 'total_tokens': 266, 'total_tokens_per_sec': 817.3283162354066}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731527850}, id='95667eaf-447f-4b53-bb6e-b6e1094ded88', tool_calls=[{'name': 'AnswerWithJustification', 'args': {'answer': 'They weigh the same.', 'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'}, 'id': 'call_17a431fc6a4240e1bd', 'type': 'tool_call'}]),
                #     'parsed': AnswerWithJustification(answer='They weigh the same.', justification='A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'),
                #     'parsing_error': None
                # }

        Example: schema=TypedDict class, method="function_calling", include_raw=False:
            .. code-block:: python

                # IMPORTANT: If you are using Python <=3.8, you need to import Annotated
                # from typing_extensions, not from typing.
                from typing_extensions import Annotated, TypedDict

                from langchain_community.chat_models import ChatSambaNovaCloud


                class AnswerWithJustification(TypedDict):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Annotated[
                        Optional[str], None, "A justification for the answer."
                    ]


                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'
                # }

        Example: schema=OpenAI function schema, method="function_calling", include_raw=False:
            .. code-block:: python

                from langchain_community.chat_models import ChatSambaNovaCloud

                oai_schema = {
                    'name': 'AnswerWithJustification',
                    'description': 'An answer to the user question along with justification for the answer.',
                    'parameters': {
                        'type': 'object',
                        'properties': {
                            'answer': {'type': 'string'},
                            'justification': {'description': 'A justification for the answer.', 'type': 'string'}
                        },
                       'required': ['answer']
                   }
                }

                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(oai_schema)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'
                # }

        Example: schema=Pydantic class, method="json_mode", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaNovaCloud
                from pydantic import BaseModel

                class AnswerWithJustification(BaseModel):
                    answer: str
                    justification: str

                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification,
                    method="json_mode",
                    include_raw=True
                )

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 5.3125, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 292.65701089829776, 'completion_tokens_after_first_per_sec_first_ten': 346.43324678555325, 'completion_tokens_per_sec': 200.012158915008, 'end_time': 1731528071.1708555, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528070.737394, 'time_to_first_token': 0.16693782806396484, 'total_latency': 0.3949759876026827, 'total_tokens': 149, 'total_tokens_per_sec': 377.2381225105847}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528070}, id='83208297-3eb9-4021-a856-ca78a15758df'),
                #     'parsed': AnswerWithJustification(answer='They are the same weight', justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'),
                #     'parsing_error': None
                # }

        Example: schema=None, method="json_mode", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaNovaCloud

                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 4.722222222222222, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 357.1315485254867, 'completion_tokens_after_first_per_sec_first_ten': 416.83279609305305, 'completion_tokens_per_sec': 240.92819585198137, 'end_time': 1731528164.8474727, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528164.4906917, 'time_to_first_token': 0.13837409019470215, 'total_latency': 0.3278985247892492, 'total_tokens': 149, 'total_tokens_per_sec': 454.4088757208256}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528164}, id='15261eaf-8a25-42ef-8ed5-f63d8bf5b1b0'),
                #     'parsed': {
                #         'answer': 'They are the same weight',
                #         'justification': 'A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'},
                #     },
                #     'parsing_error': None
                # }

        Example: schema=None, method="json_schema", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaNovaCloud

                class AnswerWithJustification(BaseModel):
                    answer: str
                    justification: str

                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification, method="json_schema", include_raw=True)

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 5.3125, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 292.65701089829776, 'completion_tokens_after_first_per_sec_first_ten': 346.43324678555325, 'completion_tokens_per_sec': 200.012158915008, 'end_time': 1731528071.1708555, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528070.737394, 'time_to_first_token': 0.16693782806396484, 'total_latency': 0.3949759876026827, 'total_tokens': 149, 'total_tokens_per_sec': 377.2381225105847}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528070}, id='83208297-3eb9-4021-a856-ca78a15758df'),
                #     'parsed': AnswerWithJustification(answer='They are the same weight', justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'),
                #     'parsing_error': None
                # }
        NzReceived unsupported arguments r   zL`schema` must be specified when method is `function_calling`. Received None.functionname)r   T)r   first_tool_only)key_namer   r   )pydantic_objectr   zI`schema` must be specified when method is not `json_mode`. Received None.z\Unrecognized method argument. Expected one of `function_calling` or `json_mode`. Received: ``rawc                     d S rR   rJ   _s    rD   <lambda>z;ChatSambaNovaCloud.with_structured_output.<locals>.<lambda>  s    RV rF   )parsedparsing_errorc                     d S rR   rJ   r   s    rD   r   z;ChatSambaNovaCloud.with_structured_output.<locals>.<lambda>  s    d rF   )r   r   )exception_key)r   )r   rT   r-   r   r    r   r   r   r/   r   r   r(   assignr   with_fallbacksr'   )rw   r   r   r   r   is_pydantic_schema	tool_namellmoutput_parserparser_assignparser_noneparser_with_fallbacks               rD   with_structured_outputz)ChatSambaNovaCloud.with_structured_outputt  s?   n GvGGHHH/77'''~ %   /v66zB6JI//6(	/BBC! 7J!($(8 8 8
 !9&! ! ! {""C " 3d9ov66 4V L L L 0 2 2}$$~ %   C " 3d9ov66 4V L L L 0 2 25+15 5 5  
  
	'/6!%((=8  M .4NNKKKK#0#?#?_ $@ $ $  3'''*>>>&&rF   messages_dictsstopc           
         |r-|| j         || j        | j        | j        | j        d| j        d	|}n%|| j         || j        | j        | j        | j        d|}t          j                    }|                    | j	        d| j
                                         dd| j        ||          }|j        dk    r!t          d	|j         d
|j         d
          |S )a*  
        Performs a post request to the LLM API.

        Args:
            messages_dicts: List of role / content dicts to use as input.
            stop: list of stop tokens
            streaming: wether to do a streaming call

        Returns:
            An iterator of response dicts.
        T	rG   r^   r   r[   r`   ra   rb   streamrd   )rG   r^   r   r[   r`   ra   rb   Bearer application/jsonAuthorizationzContent-Typeheadersjsonr      1Sambanova /complete call failed with status code .)r^   r[   r`   ra   rb   rd   requestsSessionpostrY   rZ   get_secret_valuere   status_codeRuntimeErrortext)rw   r   r   r\   r   datahttp_sessionresponses           rD   _handle_requestz"ChatSambaNovaCloud._handle_request  sB   $  	*"o#/"&"5  DD +"o#/	 	 	D  '))$$!V4+A+R+R+T+T!V!V 2  )
  % 	
 	
 3&&+'+ + +=###  
 rF   r   c                    	 |                                 }|                    d          rt          d|j         d| d          n,# t          $ r}t          d| d|j                   d}~ww xY w|d         d         d	                             d
d          }|d}i }g }g }|d         d         d	                             d          }|r||d<   |D ]}	t          |	d         d         t                    r7t          j        |	d                             di                     |	d         d<   	 |	                    t          |	d                     # t          $ r:}|	                    t          |	t          |                               Y d}~d}~ww xY wt          |||||d         d         d         |                    d          |d         |d         |d         d|d                   }
|
S )
        Process a non streaming response from the api

        Args:
            response: A request Response object

        Returns
            generation: an AIMessage with model generation
        errorr   r   ;Sambanova /complete call failed couldn't get JSON response 
response: Nchoicesr   r3   r7   r<   r;   r   	argumentsT)	return_idfinish_reasonusager[   system_fingerprintcreatedr   r   
model_namer   r   id)r7   rA   r;   invalid_tool_callsresponse_metadatar   )r   getr   r   	Exceptionr   r@   r   dumpsappendr"   r!   r   r   )rw   r   response_dicter7   rA   r;   r   raw_tool_callsraw_tool_callr3   s              rD   _process_responsez$ChatSambaNovaCloud._process_response  s   	$MMOOM  )) "/+/ / /$'''    	 	 	-a - -%]- -  	
  	*1-i8<<YKK?G,.
&y1!4Y?CCLQQ 	.<l+!/ 
 
mJ7DdKK =AZ%j155k2FF> >M*-k:%%omt&T&T&TUUUU    &--.}c!ffEE        /!1!.y!9!!<_!M&**733+G4&34H&I(3  T"
 
 
 s0   AA 
A1A,,A11$E
F 0FFc              #      K   	 ddl }n# t          $ r t          d          w xY w|                    |          }|                                D ]}|j        dk    r t          d|j         d|j         d          	 |j        dk    rFt          |j        t                    rt          j        |j                  }n t          d|j         d|j         d          |                    d          r t          d|j         d|j         d          t          |d	                   dk    rV|d	         d                             d
          }|d	         d         d         d         }|d         }t          ||i           }	nJd}|d         }||                    d          |d         |d         |d         d}
t          |||
i           }	|	V  # t          $ r}t          d| d|j                   d}~ww xY wdS )
        Process a streaming response from the api

        Args:
            response: An iterable request Response object

        Yields:
            generation: an AIMessageChunk with model partial generation
        r   NTcould not import sseclient libraryPlease install it with `pip install sseclient-py`.error_eventr   r   [DONE]r   r   r   deltar7   r   )r7   r   rA   r<   r   r[   r   r   r   r7   r   r   rA   3Error getting content chunk raw streamed response: data: )	sseclientImportError	SSEClienteventseventr   r   r   r@   r   r   loadsr   lenr   r   )rw   r   r   clientr   r   r   r7   r   chunkmetadatar   s               rD   _process_stream_responsez+ChatSambaNovaCloud._process_stream_response$  s     	 	 	 	E  	 $$X..]]__ 7	 7	E{m++"%+% %z% % %  / :))!%*c22 #z%*55*-'3- -$z- - -  
 xx(( *-'3- -$z- - -  
 4	?++a//(,Y(:(>(>(O(O"&y/!"4W"=i"H!$Z .$+b! ! ! #%!$Z-:%)XXg%6%6*.w-267K2L'+I$ $ !/$+!.6.0	! ! !  KKK   "*! * *"Z* *  g7	 7	s!   	 #>EG
G;G66G;rG   run_managerc                    | j         r" | j        |f||d|}|rt          |          S t          |          } | j        ||fddi|}|                     |          }t          |d|j        d         i          }	t          |	g          S )a  
        Call SambaNovaCloud models.

        Args:
            messages: the prompt composed of a list of messages.
            stop: a list of strings on which the model should stop generating.
                  If generation stops due to a stop token, the stop token itself
                  SHOULD BE INCLUDED as part of the output. This is not enforced
                  across models right now, but it's a good practice to follow since
                  it makes it much easier to parse the output of the model
                  downstream and understand why generation stopped.
            run_manager: A run manager with callbacks for the LLM.

        Returns:
            result: ChatResult with model generation
        r   r   r\   Fr   )r3   generation_infogenerations)	r\   _streamr   rO   r   r   r#   r   r%   )
rw   rG   r   r   r   stream_iterr   r   r3   
generations
             rD   	_generatezChatSambaNovaCloud._generates  s    . > 	9&$,# @F K  9+K888.x88'4'XXXQWXX((22#!:?!K
 
 

 zl3333rF   c              +      K   t          |          } | j        ||fddi|}|                     |          D ]4}t          |          }|r|                    |j        |           |V  5dS )a  
        Stream the output of the SambaNovaCloud chat model.

        Args:
            messages: the prompt composed of a list of messages.
            stop: a list of strings on which the model should stop generating.
                  If generation stops due to a stop token, the stop token itself
                  SHOULD BE INCLUDED as part of the output. This is not enforced
                  across models right now, but it's a good practice to follow since
                  it makes it much easier to parse the output of the model
                  downstream and understand why generation stopped.
            run_manager: A run manager with callbacks for the LLM.

        Yields:
            chunk: ChatGenerationChunk with model partial generation
        r\   Tr3   r   N)rO   r   r   r$   on_llm_new_tokenr   )	rw   rG   r   r   r   r   r   ai_message_chunkr   s	            rD   r  zChatSambaNovaCloud._stream  s      . /x88'4'WWWPVWW $ = =h G G 	 	'0@AAAE F,,UZu,EEEKKKK		 	rF   rR   NFNN):ri   rj   rk   __doc__r0   rY   r   __annotations__r1   rZ   r[   r\   r   r^   intr`   floatra   r
   rb   rd   r   r   re   rm   classmethodrs   propertyrx   r|   r   r   r   r   r   r   r)   r&   r   r   r   r/   r	   r   r   r2   r   r   r   r   r   r   r   r%   r	  r$   r  __classcell__r   s   @rD   rV   rV   n   s        O Ob r***M3***#(52#?#?#?y???!;<<<E3<<<eE***It***KeD)))J))) s+++K+++"U4000E8E?000 5...E8C=...%*UOT3J%K%K%KNDcNKKKA).r):):):S#X:::/                4    [ :DcN : : : X: 
T#s(^ 
 
 
 X
  *3 * * * X*# # # # # # # #" CG.3%= %= %=d38nd3i#s(9KXUVW%= eDcND#$=>?	%=
 &d^%= %= 
$k1	2%= %= %= %= %= %=R DHv' !v' v' v'tCH~tI>?@v' :
	v' v' v' 
$eDcNI,E&FF	Gv' v' v' v'v	 %)	; ;T#s(^,; tCy!; 	;
 ; 
; ; ; ;z9( 9y 9 9 9 9vM M	"	#M M M Md %):>	&4 &4{#&4 tCy!&4 67	&4
 &4 
&4 &4 &4 &4V %):>	 {# tCy! 67	
  
%	&       rF   rV   c                       e Zd ZU dZ ed          Zeed<   	  e ed                    Z	eed<   	  edd          Z
eed<   	  edd          Zeed	<   	  ed
          Zee         ed<   	  ed          Zeed<   	  ed          Zeed<   	  ed          Zee         ed<   	  ed
          Zee         ed<   	  ed
          Zee         ed<   	  ed
          Zee         ed<   	  ed          Zee         ed<   	  eddi          Zeeef         ed<   	  eddddd          Zeeef         ed<   	 d
Zeeeef                  ed<   	  G d d           Zed!efd"            Ze d!eeef         fd#            Z!e d!eeef         fd$            Z"e d!efd%            Z#d&ed!d
f fd'Z$d(e%d!efd)Z&d*e'e%         d!efd+Z(d,ed!e)eef         fd-Z*	 	 d6d*e'e%         d.ee'e                  dee         d!e+fd/Z,d0e+d!e-fd1Z.d0e+d!e/e0         fd2Z1	 	 d7d*e'e%         d.ee'e                  d3ee2         d&ed!e3f
d4Z4	 	 d7d*e'e%         d.ee'e                  d3ee2         d&ed!e/e5         f
d5Z6 xZ7S )8ChatSambaStudiou\  
    SambaStudio chat model.

    Setup:
        To use, you should have the environment variables:
        ``SAMBASTUDIO_URL`` set with your SambaStudio deployed endpoint URL.
        ``SAMBASTUDIO_API_KEY`` set with your SambaStudio deployed endpoint Key.
        https://docs.sambanova.ai/sambastudio/latest/index.html
        Example:
        .. code-block:: python
            ChatSambaStudio(
                sambastudio_url = set with your SambaStudio deployed endpoint URL,
                sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
                model = model or expert name (set for CoE endpoints),
                max_tokens = max number of tokens to generate,
                temperature = model temperature,
                top_p = model top p,
                top_k = model top k,
                do_sample = wether to do sample
                process_prompt = wether to process prompt
                    (set for CoE generic v1 and v2 endpoints)
                stream_options = include usage to get generation metrics
                special_tokens = start, start_role, end_role, end special tokens
                    (set for CoE generic v1 and v2 endpoints when process prompt
                     set to false or for StandAlone v1 and v2 endpoints)
                model_kwargs: Optional = Extra Key word arguments to pass to the model.
            )

    Key init args — completion params:
        model: str
            The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096
            (set for CoE endpoints).
        streaming: bool
            Whether to use streaming
        max_tokens: inthandler when using non streaming methods
            max tokens to generate
        temperature: float
            model temperature
        top_p: float
            model top p
        top_k: int
            model top k
        do_sample: bool
            wether to do sample
        process_prompt:
            wether to process prompt (set for CoE generic v1 and v2 endpoints)
        stream_options: dict
            stream options, include usage to get generation metrics
        special_tokens: dict
            start, start_role, end_role and end special tokens
            (set for CoE generic v1 and v2 endpoints when process prompt set to false
             or for StandAlone v1 and v2 endpoints) default to llama3 special tokens
        model_kwargs: dict
            Extra Key word arguments to pass to the model.

    Key init args — client params:
        sambastudio_url: str
            SambaStudio endpoint Url
        sambastudio_api_key: str
            SambaStudio endpoint api key

    Instantiate:
        .. code-block:: python

            from langchain_community.chat_models import ChatSambaStudio

            chat = ChatSambaStudio=(
                sambastudio_url = set with your SambaStudio deployed endpoint URL,
                sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
                model = model or expert name (set for CoE endpoints),
                max_tokens = max number of tokens to generate,
                temperature = model temperature,
                top_p = model top p,
                top_k = model top k,
                do_sample = wether to do sample
                process_prompt = wether to process prompt
                    (set for CoE generic v1 and v2 endpoints)
                stream_options = include usage to get generation metrics
                special_tokens = start, start_role, end_role, and special tokens
                    (set for CoE generic v1 and v2 endpoints when process prompt
                     set to false or for StandAlone v1 and v2 endpoints)
                model_kwargs: Optional = Extra Key word arguments to pass to the model.
            )
    Invoke:
        .. code-block:: python
            messages = [
                SystemMessage(content="your are an AI assistant."),
                HumanMessage(content="tell me a joke."),
            ]
            response = chat.invoke(messages)

    Stream:
        .. code-block:: python

        for chunk in chat.stream(messages):
            print(chunk.content, end="", flush=True)

    Async:
        .. code-block:: python

        response = chat.ainvoke(messages)
        await response

    Token usage:
        .. code-block:: python
        response = chat.invoke(messages)
        print(response.response_metadata["usage"]["prompt_tokens"]
        print(response.response_metadata["usage"]["total_tokens"]

    Response metadata
        .. code-block:: python

        response = chat.invoke(messages)
        print(response.response_metadata)
    r<   rW   sambastudio_urlsambastudio_api_keyT)rX   excludebase_urlstreaming_urlNr[   Fr\   r]   r^   r_   r`   ra   rb   	do_sampleprocess_promptrc   rd   z<|begin_of_text|>z;<|begin_of_text|><|start_header_id|>{role}<|end_header_id|>z
<|eot_id|>z.<|start_header_id|>assistant<|end_header_id|>
)start
start_roleend_roleendspecial_tokensmodel_kwargsc                       e Zd ZdZdS )ChatSambaStudio.ConfigTNrh   rJ   rF   rD   rm   r)  g  rn   rF   rm   r4   c                     dS rp   rJ   rq   s    rD   rs   z"ChatSambaStudio.is_lc_serializablej  rt   rF   c                     dddS )Nr  r  )r  r  rJ   rv   s    rD   rx   zChatSambaStudio.lc_secretso  s      1#8
 
 	
rF   c                     | j         | j        | j        | j        | j        | j        | j        | j        | j        | j	        | j
        dS )rz   r[   r\   r^   r`   ra   rb   r   r!  rd   r&  r'  r-  rv   s    rD   r|   z#ChatSambaStudio._identifying_paramsv  sO     Z/+ZZ"1"1"1 -
 
 	
rF   c                     dS )r~   zsambastudio-chatmodelrJ   rv   s    rD   r   zChatSambaStudio._llm_type  s
     '&rF   r   c                     t          |dd          |d<   t          t          |dd                    |d<   |                     |d                   \  |d<   |d<    t                      j        di | dS )	r   r  SAMBASTUDIO_URLr  SAMBASTUDIO_API_KEYr  r  NrJ   )r+   r*   _get_sambastudio_urlsr   r   r   s     rD   r   zChatSambaStudio.__init__  s    $8%'8%
 %
 ! )> )>@UVV)
 )
$% 7;6P6P$%7
 7
3zF?3 	""6"""""rF   r3   c                 $   t          |t                    r|j        }nrt          |t                    rd}nZt          |t                    rd}nBt          |t
                    rd}n*t          |t                    rd}nt          d|           |S )z
        Get the role of LangChain BaseMessage

        Args:
            message: LangChain BaseMessage

        Returns:
            str: Role of the LangChain BaseMessage
        r8   r9   r:   r=   r?   )r@   r   r6   r   r   r   r   rB   )rw   r3   r6   s      rD   	_get_rolezChatSambaStudio._get_role  s     g{++ 	;<DD// 		;DD.. 	;DD++ 	;DD-- 	;DD999:::rF   rG   c                    | j         r[dg d}|D ]>}|d                             |j        |                     |          |j        d           ?t          j        |          }nw| j        d         }|D ]W}|| j        d                             |                     |                    z  }|d|j         dz  }|| j        d	         z  }X|| j        d
         z  }|S )a  
        Convert a list of BaseMessages to a:
        - dumped json string with Role / content dict structure
            when process_prompt is true,
        - string with special tokens if process_prompt is false
        for generic V1 and V2 endpoints

        Args:
            messages: list of BaseMessages

        Returns:
            str: string to send as model input depending on process_prompt param
        zsambaverse-conversation-id)conversation_idrG   rG   )
message_idr6   r7   r"  r#  )r6    r$  r%  )	r!  r   r   r4  r7   r   r   r&  format)rw   rG   messages_dictr3   messages_strings        rD   _messages_to_stringz#ChatSambaStudio._messages_to_string  s'     	:#?- -M $  j)00&-j $w 7 7#*?     #j77OO"1':O# C C4#6|#D#K#K00 $L $ $   #9w#9#9#994#6z#BBt2599OrF   urlc                     d|v r|}|}n[d|v r|                     dd          }|}n>|}d|v r)d                    |                    d                    }nt          d          ||fS )a3  
        Get streaming and non streaming URLs from the given URL

        Args:
            url: string with sambastudio base or streaming endpoint url

        Returns:
            base_url: string with url to do non streaming calls
            streaming_url: string with url to do streaming calls
        openair   zstream/r<   genericzgeneric/streamUnsupported URL)replacejoinsplitr   )rw   r=  r  
stream_urls       rD   r2  z%ChatSambaStudio._get_sambastudio_urls  s     s??HJJ3;;y"55 

##!1!6!6syy7K7K!L!LJJ$%6777##rF   r   c           
         d| j         v rxt          |          }|| j        || j        | j        | j        | j        || j        d	}d |                                D             }d| j	        
                                 dd}nnd| j         v rd|                     |          d	g}| j        | j        | j        | j        | j        | j        | j        d
}| j        i || j        }d |                                D             }||d}d| j	        
                                i}nd| j         v r| j        | j        | j        | j        | j        | j        | j        d
}| j        i || j        }d |                                D             }|r|                     |          |d}n|                     |          g|d}d| j	        
                                i}nt          d| j          d          t!          j                    }	|r|	                    | j        ||d          }
n|	                    | j        ||d          }
|
j        dk    r t-          d|
j         d|
j         d          |
S )a  
        Performs a post request to the LLM API.

        Args:
        messages_dicts: List of role / content dicts to use as input.
        stop: list of stop tokens
        streaming: wether to do a streaming call

        Returns:
            A request Response object
        r?  r   c                     i | ]
\  }}|||S rR   rJ   rK   keyvalues      rD   
<dictcomp>z3ChatSambaStudio._handle_request.<locals>.<dictcomp>  s#    SSS:3ARCARARARrF   r   r   r   api/v2/predict/genericitem0)r   rJ  )select_expertr!  max_tokens_to_generater`   ra   rb   r   Nc                     i | ]
\  }}|||S rR   rJ   rH  s      rD   rK  z3ChatSambaStudio._handle_request.<locals>.<dictcomp>-  s#    WWWZS%UEVc5EVEVEVrF   )itemsparamsrI  api/predict/genericc                 b    i | ],\  }}||t          |          j        t          |          d-S )N)rS   rJ  )rS   ri   r   rH  s      rD   rK  z3ChatSambaStudio._handle_request.<locals>.<dictcomp>>  sE       C$ d5kk2SZZHH$$$rF   )instancerR  )	instancesrR  rA  9only openai, generic v1 and generic v2 APIs are supportedTr   Fr   r   r   )r  rO   r^   r[   r`   ra   rb   rd   rQ  r  r   r<  r!  r   r'  r   r   r   r   r  r  r   r   r   )rw   rG   r   r\   r   r   r   rQ  rR  r   r   s              rD   r   zChatSambaStudio._handle_request  sB   & t+++28<<N*"o#/#"&"5
 
D TSSSSD"A+<<>>"A "A 2 GG &)===#d.F.Fx.P.PQQRE!%"&"5*./#/!^& &F  ,8F8d&78WW6<<>>WWWF"f55Dd6GGIIJGG #d&:::!%"&"5*./#/!^ F  ,8F8d&78 "(,,..  F
  	 $ 8 8 B B$  #'":":8"D"D!E$  d6GGIIJGG L$"6 L L L  
  ')) 	#(("G$t )  HH $((wT% )  H 3&&$'$ $=$ $ $  
 rF   r   c                    	 |                                 }n,# t          $ r}t          d| d|j                   d}~ww xY wd| j        v rb|d         d         d         d         }|d	         }|d         d         d
         |                    d          |d         |d         |d         d}nd| j        v r=|d         d         d         d         }|d         d         d	         }|d         d         }n:d| j        v r|d         d         d         }d}|}nt          d| j         d          t          |i ||          S )r   r   r   Nr?  r   r   r3   r7   r   r   r   r[   r   r   r   rL  rQ  rJ  
completionrS  predictionsrA  rW  )r7   rA   r   r   )r   r   r   r   r  r   r   r   )rw   r   r   r   r7   r   r   s          rD   r   z!ChatSambaStudio._process_responsef  s   	$MMOOMM 	 	 	-a - -%]- -  	 t+++#I.q1)<YGGt$B!.y!9!!<_!M&**733+G4&34H&I(3! ! &)===#G,Q/8FGw'*40B -g 6q 9 #d&:::#M215lCGB - L$"6 L L L  
  /	
 
 
 	
s    
A ;A c              #     K   	 ddl }n# t          $ r t          d          w xY wd| j        v rd}|                    |          }|                                D ]}|j        dk    r t          d|j         d|j         d          	 |j        d	k    rt          |j        t                    rt          j        |j                  }n t          d|j         d|j         d          |                    d
          r t          d|j         d|j         d          t          |d                   dk    rF|d         d                             d          }|d         d         d         d         }|d         }i }	n7d}|d         }||                    d          |d         |d         |d         d}	|                    d          7d}|d         }||                    d          |d         |d         |d         d}	t          |||	i           V  # t           $ r}
t          d|
 d|j                   d}
~
ww xY wdS d| j        v r|                                D ]}	 t          j        |          }|d         d         d         d         d         }|d         d         d         d         }|d         d         d         d         d         r|d         d         d         d                             d          |d         d         d         d                             d          |d         d         d         d                             d           |d         d         d         d                             d!          |d         d         d         d                             d"          |d         d         d         d                             d#          |d         d         d         d                             d$          |d         d         d         d                             d%          |d         d         d         d                             d&          |d         d         d         d                             d'          |d         d         d         d                             d(          d)	d*}	ni }	t          |||	i           V  v# t           $ r}
t          d|
 d+|           d}
~
ww xY wdS d,| j        v rM|                                D ]5}	 t          j        |          }|d         d-         d         d         }d}|d         d-         d         d         r|d         d-         d                             d          |d         d-         d                             d          |d         d-         d                             d           |d         d-         d                             d!          |d         d-         d                             d"          |d         d-         d                             d#          |d         d-         d                             d$          |d         d-         d                             d%          |d         d-         d                             d&          |d         d-         d                             d'          |d         d-         d                             d(          d)	d*}	ni }	t          |||	i           V  # t           $ r}
t          d|
 d+|           d}
~
ww xY wdS t%          d.| j         d/          )0r   r   Nr   r?  r<   r   r   r   r   r   r   r   r   r7   r   r   r[   r   r   r   r   r   r   rL  resultrQ  rJ  stream_tokenis_last_responsestop_reasonpromptprompt_tokens_countcompletion_tokens_counttotal_tokens_count
start_timeend_timemodel_execution_timetime_to_first_tokenthroughput_after_first_tokenbatch_size_used)	ra  rb  rc  rd  re  rf  rg  rh  ri  )r   r`  r   zline: rS  	responsesrA  rW  )r   r   r  r   r   r   r   r   r   r@   r   r   r   r   r   r   r   
iter_linesr   )rw   r   r   r   r   r   r   r7   r   r   r   lines               rD   r   z(ChatSambaStudio._process_stream_response  s     	 	 	 	E  	 t+++M((22F < <;-//&)#/) ) :) ) )  
5zX--%ej#66 #':ej#9#9DD".!1#+#7!1 !1#(:!1 !1 !1# # 
  88G,, ".!1#+#7!1 !1#(:!1 !1 !1# # 
 tI//!33,0OA,>,B,B?,S,SM&*9oa&8&A)&LG!%dB')HH&(G!%dB1>)-'):):.27m6:;O6P+/	?( (H  88G,,8&(G!%dB1>)-'):):.27m6:;O6P+/	?( (H -$+!.6.0	      !   &.a . .!&. .  q< <~ &)=== ++-- 6 65:d++D"8nW5a8A.QGh03D9BH~g.q1':;MN %&-1(^G-DQ-G-P-T-T -. . '+8nW&=a&@&I&M&Mh&W&W7;H~g7Nq7Q$+8""%#&;"<"<;?>';RST;U$+<""%#&?"@"@6:8nW6Ma6P$+7""%#&:";";.28nW.Ea.H.Q.U.U$0/" /" -1N7,CA,Fw,O,S,S$.-" -" 9=Xw8OPQ8R$+9""%#&<"="=7;H~g7Nq7Q$+8""%#&;"<"<@DXw@W$%A"")A++.3/M+N+N37>'3J13M$+4""%#&7"8"87& &"$ "$H $&( '*2*,	       !   &(a ( (!%( (  e6 6r #d&::: ++-- 6 65:d++D"8n[9!<^LGBH~k2156HI %&-1(^K-H-K-O-O -. . '+8n[&A!&D&H&H&R&R7;H~k7R$%8""%#&;"<"<;?>+;V$%<""%#&?"@"@6:8n[6Q$%7""%#&:";";.28n[.I!.L.P.P$0/" /" -1N;,G,J,N,N$.-" -" 9=X{8S$%9""%#&<"="=7;H~k7R$%8""%#&;"<"<@DX$/A""#A%%(S)G%H%H37>+3Nq3Q3U3U$54" 4"3& &"$ "$H $&( '*2*,	       !   &(a ( (!%( (  e6 6r L$"6 L L L  sS   	 #
FH
I"H<<I(I0S
S>$S99S>%H
\11
];]]r   c                     | j         r" | j        |f||d|}|rt          |          S |                     ||d          }|                     |          }t          |          }t          |g          S )a  
        Call SambaStudio models.

        Args:
            messages: the prompt composed of a list of messages.
            stop: a list of strings on which the model should stop generating.
                  If generation stops due to a stop token, the stop token itself
                  SHOULD BE INCLUDED as part of the output. This is not enforced
                  across models right now, but it's a good practice to follow since
                  it makes it much easier to parse the output of the model
                  downstream and understand why generation stopped.
            run_manager: A run manager with callbacks for the LLM.

        Returns:
            result: ChatResult with model generation
        r  Fr\   r  r  )r\   r  r   r   r   r#   r%   )	rw   rG   r   r   r   r  r   r3   r  s	            rD   r	  zChatSambaStudio._generatep  s    . > 	9&$,# @F K  9+K888''$%'HH((22#G444
zl3333rF   c              +      K   |                      ||d          }|                     |          D ]4}t          |          }|r|                    |j        |           |V  5dS )a  
        Stream the output of the SambaStudio model.

        Args:
            messages: the prompt composed of a list of messages.
            stop: a list of strings on which the model should stop generating.
                  If generation stops due to a stop token, the stop token itself
                  SHOULD BE INCLUDED as part of the output. This is not enforced
                  across models right now, but it's a good practice to follow since
                  it makes it much easier to parse the output of the model
                  downstream and understand why generation stopped.
            run_manager: A run manager with callbacks for the LLM.

        Yields:
            chunk: ChatGenerationChunk with model partial generation
        Trn  r  r  N)r   r   r$   r  r   )rw   rG   r   r   r   r   r  r   s           rD   r  zChatSambaStudio._stream  s      . ''$$'GG $ = =h G G 	 	'0@AAAE F,,UZu,EEEKKKK		 	rF   r  r  )8ri   rj   rk   r  r0   r  r   r  r1   r  r  r  r[   r
   r\   r   r^   r  r`   r  ra   rb   r   r!  rd   r   r   r&  r'  rm   r  rs   r  rx   r|   r   r   r   r4  r   r<  r   r2  r2   r   r   r   r   r   r   r   r%   r	  r$   r  r  r  s   @rD   r  r    sj        r rh !5,,,OS,,,%*U99R==%A%A%AAAAE"d333Hc333'r4888M3888# 5...E8C=...DeE***It***KeD)))J))) #(5#5#5#5K%555"U4000E8E?000 5...E8C=... %d 3 3 3Ix~333 %*U4%8%8%8NHTN888F%*UOT3J%K%K%KNDcNKKKA%*U(W$D	
 
& & &NDcN   (
 .2L(4S>*1112                4    [ 
DcN 
 
 
 X
 
T#s(^ 
 
 
 X
( '3 ' ' ' X'# # # # # # # #     0&D,= &# & & & &P$ $sCx $ $ $ $: %)$)	l l{#l tCy!l D>	l
 
l l l l\7
( 7
y 7
 7
 7
 7
rO O	"	#O O O Oh %):>	 4  4{# 4 tCy! 4 67	 4
  4 
 4  4  4  4J %):>	 {# tCy! 67	
  
%	&       rF   r  )Hr   operatorr   typingr   r   r   r   r   r	   r
   r   r   r   r   r   r   langchain_core.callbacksr   langchain_core.language_modelsr   *langchain_core.language_models.chat_modelsr   r   langchain_core.messagesr   r   r   r   r   r   r   r   langchain_core.output_parsersr   r   "langchain_core.output_parsers.baser   *langchain_core.output_parsers.openai_toolsr   r    r!   r"   langchain_core.outputsr#   r$   r%   langchain_core.runnablesr&   r'   r(   langchain_core.toolsr)   langchain_core.utilsr*   r+   %langchain_core.utils.function_callingr-   langchain_core.utils.pydanticr.   pydanticr/   r0   r1   r2   r   rE   rO   r   rT   rV   r  rJ   rF   rD   <module>r     s                                           > = = = = =       	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	        @ ? ? ? ? ?            S R R R R R R R R R O O O O O O O O O O ) ) ) ) ) ) L L L L L L L L H H H H H H ? ? ? ? ? ? 0 0 0 0 0 0 0 0 0 0      k d38n    DD$5 $tCH~:N    @C @D @ @ @ @J J J J J J J JZs s s s sm s s s s srF   