
    dhp                       S SK r S SKJr  S SKJrJrJrJrJrJ	r	J
r
JrJrJrJrJr  S SKrS SKJr  S SKJr  S SKJr  S SKJrJr  S SKJrJrJrJrJrJ r J!r!J"r"  S S	K#J$r$J%r%  S S
K&J'r'  S SK(J)r)J*r*J+r+J,r,  S SK-J.r.J/r/J0r0  S SK1J2r2J3r3J4r4  S SK5J6r6  S SK7J8r8J9r9  S SK:J;r;  S SK<J=r=  S SK>J?r?J@r@JArA  S SKJBrB  S\S\\C\4   4S jrDS\\   S\\\C\4      4S jrES\S\F4S jrG\" SSSS9 " S S \5      5       rH\" SSS!S9 " S" S#\5      5       rIg)$    N)
itemgetter)AnyCallableDictIteratorListLiteralOptionalSequenceTupleTypeUnioncast)
deprecated)CallbackManagerForLLMRun)LanguageModelInput)BaseChatModelgenerate_from_stream)	AIMessageAIMessageChunkBaseMessageBaseMessageChunkChatMessageHumanMessageSystemMessageToolMessage)JsonOutputParserPydanticOutputParser)OutputParserLike)JsonOutputKeyToolsParserPydanticToolsParsermake_invalid_tool_callparse_tool_call)ChatGenerationChatGenerationChunk
ChatResult)RunnableRunnableMapRunnablePassthrough)BaseTool)convert_to_secret_strget_from_dict_or_env)convert_to_openai_tool)is_basemodel_subclass)	BaseModelField	SecretStr)Responsemessagereturnc                 (   0 n[        U [        5      (       a  U R                  U R                  S.nU$ [        U [        5      (       a  SU R                  S.nU$ [        U [
        5      (       a  SU R                  S.nU$ [        U [        5      (       aA  SU R                  S.nSU R                  ;   a   U R                  S   US'   US   S:X  a  SUS'   U$ [        U [        5      (       a  S	U R                  U R                  S
.nU$ [        SU  35      e)z
convert a BaseMessage to a dictionary with Role / content

Args:
    message: BaseMessage

Returns:
    messages_dict:  role / content dict
)rolecontentsystemuser	assistant
tool_callsr7    Ntool)r6   r7   tool_call_idGot unknown type )
isinstancer   r6   r7   r   r   r   additional_kwargsr   r>   	TypeError)r3   message_dicts     a/var/www/html/shao/venv/lib/python3.13/site-packages/langchain_community/chat_models/sambanova.py_convert_message_to_dictrE   ;   s    $&L';'' 'I& % 
G]	+	+ (W__E" ! 
G\	*	* &7??C  
GY	'	' +H7444)0)B)B<)PL&I&",*.Y'  
G[	)	)#00
  +G9566    messagesc                 D    U  Vs/ sH  n[        U5      PM     nnU$ s  snf )z
Convert a list of BaseMessages to a list of dictionaries with Role / content

Args:
    messages: list of BaseMessages

Returns:
    messages_dicts:  list of role / content dicts
)rE   )rG   mmessage_dictss      rD   _create_message_dictsrK   ]   s+     ;CC(Q-a0(MC Ds   objc                 F    [        U [        5      =(       a    [        U 5      $ N)r@   typer.   )rL   s    rD   _is_pydantic_classrP   k   s    c4 ?%:3%??rF   z0.3.16z1.0z&langchain_sambanova.ChatSambaNovaCloud)sinceremovalalternative_importc                     ^  \ rS rSr% Sr\" SS9r\\S'    \" \	" S5      S9r
\	\S'    \" SS9r\\S'    \" S	S9r\\S
'    \" SS9r\\S'    \" SS9r\\S'    \" SS9r\\   \S'    \" SS9r\\   \S'    \" SS0S9r\\\4   \S'    \" 0 S9r\\\4   \S'     " S S5      r\S\4S j5       r\S\\\4   4S j5       r\S\\\4   4S j5       r\S\4S j5       rS\SS4U 4S jjr SS	S.S \!\"\\\4   \#\   \$S!\4   \%4      S"\\"\\\4   \\4      S#\\   S\S\&\'\(4   4
U 4S$ jjjr) S7S%S	S&.S'\\"\\\4   \#\*   4      S(\+S)   S*\S\S\&\'\"\\\4   \*4   4   4
S+ jjjr,  S8S,\-\\\4      S-\\-\      S
\S\S\.4
S. jjr/S/\.S\04S0 jr1S/\.S\2\3   4S1 jr4  S9S2\-\(   S-\\-\      S3\\5   S\S\64
S4 jjr7  S9S2\-\(   S-\\-\      S3\\5   S\S\2\8   4
S5 jjr9S6r:U =r;$ ):ChatSambaNovaCloudo   u  
SambaNova Cloud chat model.

Setup:
    To use, you should have the environment variables:
    `SAMBANOVA_URL` set with your SambaNova Cloud URL.
    `SAMBANOVA_API_KEY` set with your SambaNova Cloud API Key.
    http://cloud.sambanova.ai/
    Example:
    .. code-block:: python
        ChatSambaNovaCloud(
            sambanova_url = SambaNova cloud endpoint URL,
            sambanova_api_key = set with your SambaNova cloud API key,
            model = model name,
            max_tokens = max number of tokens to generate,
            temperature = model temperature,
            top_p = model top p,
            top_k = model top k,
            stream_options = include usage to get generation metrics
        )

Key init args — completion params:
    model: str
        The name of the model to use, e.g., Meta-Llama-3-70B-Instruct.
    streaming: bool
        Whether to use streaming handler when using non streaming methods
    max_tokens: int
        max tokens to generate
    temperature: float
        model temperature
    top_p: float
        model top p
    top_k: int
        model top k
    stream_options: dict
        stream options, include usage to get generation metrics

Key init args — client params:
    sambanova_url: str
        SambaNova Cloud Url
    sambanova_api_key: str
        SambaNova Cloud api key

Instantiate:
    .. code-block:: python

        from langchain_community.chat_models import ChatSambaNovaCloud

        chat = ChatSambaNovaCloud(
            sambanova_url = SambaNova cloud endpoint URL,
            sambanova_api_key = set with your SambaNova cloud API key,
            model = model name,
            max_tokens = max number of tokens to generate,
            temperature = model temperature,
            top_p = model top p,
            top_k = model top k,
            stream_options = include usage to get generation metrics
        )

Invoke:
    .. code-block:: python

        messages = [
            SystemMessage(content="your are an AI assistant."),
            HumanMessage(content="tell me a joke."),
        ]
        response = chat.invoke(messages)

Stream:
    .. code-block:: python

        for chunk in chat.stream(messages):
            print(chunk.content, end="", flush=True)

Async:
    .. code-block:: python

        response = chat.ainvoke(messages)
        await response

Tool calling:
    .. code-block:: python

        from pydantic import BaseModel, Field

        class GetWeather(BaseModel):
            '''Get the current weather in a given location'''

            location: str = Field(
                ...,
                description="The city and state, e.g. Los Angeles, CA"
            )

        llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
        ai_msg = llm_with_tools.invoke("Should I bring my umbrella today in LA?")
        ai_msg.tool_calls

    .. code-block:: none

        [
            {
                'name': 'GetWeather',
                'args': {'location': 'Los Angeles, CA'},
                'id': 'call_adf61180ea2b4d228a'
            }
        ]

Structured output:
    .. code-block:: python

        from typing import Optional

        from pydantic import BaseModel, Field

        class Joke(BaseModel):
            '''Joke to tell user.'''

            setup: str = Field(description="The setup of the joke")
            punchline: str = Field(description="The punchline to the joke")

        structured_model = llm.with_structured_output(Joke)
        structured_model.invoke("Tell me a joke about cats")

    .. code-block:: python

        Joke(setup="Why did the cat join a band?",
        punchline="Because it wanted to be the purr-cussionist!")

    See `ChatSambanovaCloud.with_structured_output()` for more.

Token usage:
    .. code-block:: python

        response = chat.invoke(messages)
        print(response.response_metadata["usage"]["prompt_tokens"]
        print(response.response_metadata["usage"]["total_tokens"]

Response metadata
    .. code-block:: python

        response = chat.invoke(messages)
        print(response.response_metadata)

r<   defaultsambanova_urlsambanova_api_keyzMeta-Llama-3.1-8B-InstructmodelF	streaming   
max_tokensffffff?temperatureNtop_ptop_kinclude_usageTstream_optionsadditional_headersc                       \ rS rSrSrSrg)ChatSambaNovaCloud.Configi$  T N__name__
__module____qualname____firstlineno__populate_by_name__static_attributes__rh   rF   rD   Configrg   $      rF   rp   r4   c                     gz9Return whether this model can be serialized by Langchain.Frh   clss    rD   is_lc_serializable%ChatSambaNovaCloud.is_lc_serializable'       rF   c                 
    SS0$ )NrZ   rh   selfs    rD   
lc_secretsChatSambaNovaCloud.lc_secrets,  s    #%899rF   c                     U R                   U R                  U R                  U R                  U R                  U R
                  U R                  S.$ )Return a dictionary of identifying parameters.

This information is used by the LangChain callback system, which
is used for tracing purposes make it possible to monitor LLMs.
r[   r\   r^   r`   ra   rb   rd   r   rz   s    rD   _identifying_params&ChatSambaNovaCloud._identifying_params0  sC     ZZ//++ZZZZ"11
 	
rF   c                     g)7Get the type of language model used by this chat model.zsambanovacloud-chatmodelrh   rz   s    rD   	_llm_typeChatSambaNovaCloud._llm_typeA  s     *rF   kwargsc                 v   > [        USSSS9US'   [        [        USS5      5      US'   [        TU ]  " S0 UD6  g)	'init and validate environment variablesrY   SAMBANOVA_URLz,https://api.sambanova.ai/v1/chat/completionsrW   rZ   SAMBANOVA_API_KEYNrh   )r,   r+   super__init__r{   r   	__class__s     rD   r   ChatSambaNovaCloud.__init__F  sP    "6B	#
 '< )<>QR'
"# 	"6"rF   tool_choiceparallel_tool_callstools.r   r   c                ^  > U Vs/ sH  n[        U5      PM     nnU(       ak  [        U[        5      (       a	  US;  a  SnOO[        U[        5      (       a
  U(       a  SnO0[        U[        5      (       a  [        S5      e[        SU 35      eSnX$S'   X4S'   [        TU ]  " S	SU0UD6$ s  snf 
zBind tool-like objects to this chat model

tool_choice: does not currently support "any", choice like
should be one of ["auto", "none", "required"]
)autononerequiredr   r   z7tool_choice must be one of ['auto', 'none', 'required']z;Unrecognized tool_choice type. Expected str, boolReceived: r   r   r   rh   r-   r@   strbooldict
ValueErrorr   bindr{   r   r   r   r   r=   formatted_toolsr   s          rD   
bind_toolsChatSambaNovaCloud.bind_toolsS       EJJED1$7EJ+s++&BB"(KK..",KK.. M  !!!,/ 
 !K +}(;$%w|</<V<</ K   B*function_callingmethodinclude_rawschemar   r   	json_modejson_schemar   c                   U(       a  [        SU 35      e[        U5      nUS:X  aN  Uc  [        S5      e[        U5      S   S   nU R                  U/US9nU(       a  [	        U/SS9nO[        USS	9nOUS
:X  a5  U nU(       a!  [        [        [           U5      n[        US9nOc[        5       nOXUS:X  aC  Uc  [        S5      eU nU(       a!  [        [        [           U5      n[        US9nO[        5       nO[        SU S35      eU(       aT  [        R                  " [        S5      U-  S S9n	[        R                  " S S9n
U	R                  U
/SS9n[        US9U-  $ Xx-  $ )a?  Model wrapper that returns outputs formatted to match the given schema.

        Args:
            schema:
                The output schema. Can be passed in as:
                    - an OpenAI function/tool schema,
                    - a JSON Schema,
                    - a TypedDict class,
                    - or a Pydantic.BaseModel class.
                If `schema` is a Pydantic class then the model output will be a
                Pydantic instance of that class, and the model-generated fields will be
                validated by the Pydantic class. Otherwise the model output will be a
                dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
                for more on how to properly specify types and descriptions of
                schema fields when specifying a Pydantic or TypedDict class.

            method:
                The method for steering model generation, either "function_calling"
                "json_mode" or "json_schema".
                If "function_calling" then the schema will be converted
                to an OpenAI function and the returned model will make use of the
                function-calling API. If "json_mode" or "json_schema" then OpenAI's
                JSON mode will be used.
                Note that if using "json_mode" or "json_schema" then you must include instructions
                for formatting the output into the desired schema into the model call.

            include_raw:
                If False then only the parsed structured output is returned. If
                an error occurs during model output parsing it will be raised. If True
                then both the raw model response (a BaseMessage) and the parsed model
                response will be returned. If an error occurs during output parsing it
                will be caught and returned as well. The final output is always a dict
                with keys "raw", "parsed", and "parsing_error".

        Returns:
            A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.

            If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
            an instance of `schema` (i.e., a Pydantic object).

            Otherwise, if `include_raw` is False then Runnable outputs a dict.

            If `include_raw` is True, then Runnable outputs a dict with keys:
                - `"raw"`: BaseMessage
                - `"parsed"`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
                - `"parsing_error"`: Optional[BaseException]

        Example: schema=Pydantic class, method="function_calling", include_raw=False:
            .. code-block:: python

                from typing import Optional

                from langchain_community.chat_models import ChatSambaNovaCloud
                from pydantic import BaseModel, Field


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: str = Field(
                        description="A justification for the answer."
                    )


                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )

                # -> AnswerWithJustification(
                #     answer='They weigh the same',
                #     justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same.'
                # )

        Example: schema=Pydantic class, method="function_calling", include_raw=True:
            .. code-block:: python

                from langchain_community.chat_models import ChatSambaNovaCloud
                from pydantic import BaseModel


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: str


                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification, include_raw=True
                )

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'arguments': '{"answer": "They weigh the same.", "justification": "A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount."}', 'name': 'AnswerWithJustification'}, 'id': 'call_17a431fc6a4240e1bd', 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls', 'usage': {'acceptance_rate': 5, 'completion_tokens': 53, 'completion_tokens_after_first_per_sec': 343.7964936837758, 'completion_tokens_after_first_per_sec_first_ten': 439.1205661878638, 'completion_tokens_per_sec': 162.8511306784833, 'end_time': 1731527851.0698032, 'is_last_response': True, 'prompt_tokens': 213, 'start_time': 1731527850.7137961, 'time_to_first_token': 0.20475482940673828, 'total_latency': 0.32545061111450196, 'total_tokens': 266, 'total_tokens_per_sec': 817.3283162354066}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731527850}, id='95667eaf-447f-4b53-bb6e-b6e1094ded88', tool_calls=[{'name': 'AnswerWithJustification', 'args': {'answer': 'They weigh the same.', 'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'}, 'id': 'call_17a431fc6a4240e1bd', 'type': 'tool_call'}]),
                #     'parsed': AnswerWithJustification(answer='They weigh the same.', justification='A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'),
                #     'parsing_error': None
                # }

        Example: schema=TypedDict class, method="function_calling", include_raw=False:
            .. code-block:: python

                # IMPORTANT: If you are using Python <=3.8, you need to import Annotated
                # from typing_extensions, not from typing.
                from typing_extensions import Annotated, TypedDict

                from langchain_community.chat_models import ChatSambaNovaCloud


                class AnswerWithJustification(TypedDict):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Annotated[
                        Optional[str], None, "A justification for the answer."
                    ]


                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'
                # }

        Example: schema=OpenAI function schema, method="function_calling", include_raw=False:
            .. code-block:: python

                from langchain_community.chat_models import ChatSambaNovaCloud

                oai_schema = {
                    'name': 'AnswerWithJustification',
                    'description': 'An answer to the user question along with justification for the answer.',
                    'parameters': {
                        'type': 'object',
                        'properties': {
                            'answer': {'type': 'string'},
                            'justification': {'description': 'A justification for the answer.', 'type': 'string'}
                        },
                       'required': ['answer']
                   }
                }

                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(oai_schema)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'
                # }

        Example: schema=Pydantic class, method="json_mode", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaNovaCloud
                from pydantic import BaseModel

                class AnswerWithJustification(BaseModel):
                    answer: str
                    justification: str

                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification,
                    method="json_mode",
                    include_raw=True
                )

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 5.3125, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 292.65701089829776, 'completion_tokens_after_first_per_sec_first_ten': 346.43324678555325, 'completion_tokens_per_sec': 200.012158915008, 'end_time': 1731528071.1708555, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528070.737394, 'time_to_first_token': 0.16693782806396484, 'total_latency': 0.3949759876026827, 'total_tokens': 149, 'total_tokens_per_sec': 377.2381225105847}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528070}, id='83208297-3eb9-4021-a856-ca78a15758df'),
                #     'parsed': AnswerWithJustification(answer='They are the same weight', justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'),
                #     'parsing_error': None
                # }

        Example: schema=None, method="json_mode", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaNovaCloud

                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 4.722222222222222, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 357.1315485254867, 'completion_tokens_after_first_per_sec_first_ten': 416.83279609305305, 'completion_tokens_per_sec': 240.92819585198137, 'end_time': 1731528164.8474727, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528164.4906917, 'time_to_first_token': 0.13837409019470215, 'total_latency': 0.3278985247892492, 'total_tokens': 149, 'total_tokens_per_sec': 454.4088757208256}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528164}, id='15261eaf-8a25-42ef-8ed5-f63d8bf5b1b0'),
                #     'parsed': {
                #         'answer': 'They are the same weight',
                #         'justification': 'A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'},
                #     },
                #     'parsing_error': None
                # }

        Example: schema=None, method="json_schema", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaNovaCloud

                class AnswerWithJustification(BaseModel):
                    answer: str
                    justification: str

                llm = ChatSambaNovaCloud(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification, method="json_schema", include_raw=True)

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 5.3125, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 292.65701089829776, 'completion_tokens_after_first_per_sec_first_ten': 346.43324678555325, 'completion_tokens_per_sec': 200.012158915008, 'end_time': 1731528071.1708555, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528070.737394, 'time_to_first_token': 0.16693782806396484, 'total_latency': 0.3949759876026827, 'total_tokens': 149, 'total_tokens_per_sec': 377.2381225105847}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528070}, id='83208297-3eb9-4021-a856-ca78a15758df'),
                #     'parsed': AnswerWithJustification(answer='They are the same weight', justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'),
                #     'parsing_error': None
                # }
        Received unsupported arguments r   zL`schema` must be specified when method is `function_calling`. Received None.functionnamer   Tr   first_tool_onlykey_namer   r   pydantic_objectr   zI`schema` must be specified when method is not `json_mode`. Received None.z\Unrecognized method argument. Expected one of `function_calling` or `json_mode`. Received: ``rawc                     g rN   rh   _s    rD   <lambda>;ChatSambaNovaCloud.with_structured_output.<locals>.<lambda>      RVrF   parsedparsing_errorc                     g rN   rh   r   s    rD   r   r         drF   r   r   exception_keyr   r   rP   r-   r   r!   r    r   r   r/   r   r   r)   assignr   with_fallbacksr(   r{   r   r   r   r   is_pydantic_schema	tool_namellmoutput_parserparser_assignparser_noneparser_with_fallbacks               rD   with_structured_output)ChatSambaNovaCloud.with_structured_outputz  s   n >vhGHH/7''~ %  /v6zB6JI//6(	/BC!7J!($(8
 !9&! {"C "d9ov6 4V L 0 2}$~ %  C "d9ov6 4V L 0 2++1(!5 
 /66!%(=8M .44NKK#0#?#?_ $@ $  3'*>>>&&rF   messages_dictsstopc           
      J   U(       aK  UU R                   UU R                  U R                  U R                  U R                  SU R
                  S.	UEnO>UU R                   UU R                  U R                  U R                  U R                  S.UEn[        R                  " 5       nUR                  U R                  SU R                  R                  5        3SS.U R                  EUUS9nUR                  S:w  a'  [        S	UR                   S
3UR                   S
35      eU$ )z
Performs a post request to the LLM API.

Args:
    messages_dicts: List of role / content dicts to use as input.
    stop: list of stop tokens
    streaming: wether to do a streaming call

Returns:
    An iterator of response dicts.
T	rG   r^   r   r[   r`   ra   rb   streamrd   )rG   r^   r   r[   r`   ra   rb   Bearer application/jsonAuthorizationzContent-Typeheadersjsonr      1Sambanova /complete call failed with status code .)r^   r[   r`   ra   rb   rd   requestsSessionpostrY   rZ   get_secret_valuere   status_codeRuntimeErrortext)r{   r   r   r\   r   datahttp_sessionresponses           rD   _handle_request"ChatSambaNovaCloud._handle_request  s=   $ *"oo#//"&"5"5 D +"oo#//	 	D  '')$$#*4+A+A+R+R+T*U!V 2 ))
  % 	
 3&C''(+==/# 
 rF   r   c                 D    UR                  5       nUR                  S5      (       a  [        SUR                   S3U S35      e US   S   S	   R                  S
S5      nUc  Sn0 n/ n/ nUS   S   S	   R                  S5      nU(       aq  XS'   U Hg  n	[        U	S   S   [        5      (       a/  [         R                  " U	S   R                  S0 5      5      U	S   S'    UR                  [        U	SS95        Mi     [        UUUUUS   S   S   UR                  S5      US   US   US   S.US   S9n
U
$ ! [         a   n[        SU SUR
                   35      eSnAff = f! [         a0  nUR                  [        U	[        U5      5      5         SnAGM  SnAff = f)
Process a non streaming response from the api

Args:
    response: A request Response object

Returns
    generation: an AIMessage with model generation
errorr   r   ;Sambanova /complete call failed couldn't get JSON response 
response: Nchoicesr   r3   r7   r<   r;   r   	argumentsT	return_idfinish_reasonusager[   system_fingerprintcreatedr   r   
model_namer   r   idr7   rA   r;   invalid_tool_callsresponse_metadatar  )r   getr   r   	Exceptionr   r@   r   dumpsappendr#   r"   r   r   )r{   r   response_dicter7   rA   r;   r  raw_tool_callsraw_tool_callr3   s              rD   _process_response$ChatSambaNovaCloud._process_response  s   	$MMOM  ))"G++,A/$oQ'  *  	*1-i8<<YK?G,.
&y1!4Y?CCLQ.<l+!/mJ7DdKK=AZZ%j155k2F>M*-k:%%omt&TU "0 /!1!.y!9!!<_!M&**73+G4&34H&I(3 T"
 O  	MaSX]]O- 	* ! &--.}c!fE s0   AD8 !E%8
E"EE"%
F/$FFc              #     #     SSK nUR                  U5      nUR                  5        GHo  nUR                  S:X  a&  [        SUR                   SUR                   S35      e UR                  S:w  Ga#  [        UR                  [        5      (       a!  [        R                  " UR                  5      nO&[        SUR                   SUR                   S35      eUR                  S5      (       a&  [        SUR                   SUR                   S35      e[        US	   5      S:  a5  US	   S   R                  S
5      nUS	   S   S   S   nUS   n[        Xx0 S9n	O3SnUS   nWUR                  S5      US   US   US   S.n
[        UUU
0 S9n	U	v   GMo  GMr     g! [         a    [        S5      ef = f! [         a   n[        SU SUR                   35      eSnAff = f7f)
Process a streaming response from the api

Args:
    response: An iterable request Response object

Yields:
    generation: an AIMessageChunk with model partial generation
r   NTcould not import sseclient libraryPlease install it with `pip install sseclient-py`.error_eventr   r   [DONE]r   r   r   deltar7   r  )r7   r  rA   r<   r   r[   r   r   r   r7   r  r  rA   3Error getting content chunk raw streamed response: data: )	sseclientImportError	SSEClienteventseventr   r   r   r@   r   r   loadsr  lenr   r  )r{   r   r  clientr  r   r   r7   r  chunkmetadatar
  s               rD   _process_stream_response+ChatSambaNovaCloud._process_stream_response*  s#    	 $$X.]]_E{{m+"G++,Azzl!% / ::)!%**c22#zz%**5*O'334A$zzl!- 
 xx((*O'334A$zzl!- 
 4	?+a/(,Y(:(>(>(O"&y/!"4W"=i"H!$Z .$+b! #%!$Z-:%)XXg%6*.w-267K2L'+I$ !/$+!.6.0	!  KM * %  	E 	v  "I!UZZL* sA   G$F AG$$D1F7	G$F44G$7
G!GG!!G$rG   run_managerc                    U R                   (       a'  U R                  " U4X#S.UD6nU(       a  [        U5      $ [        U5      nU R                  " Xb4SS0UD6nU R                  U5      n[        USUR                  S   0S9n	[        U	/S9$ )as  
Call SambaNovaCloud models.

Args:
    messages: the prompt composed of a list of messages.
    stop: a list of strings on which the model should stop generating.
          If generation stops due to a stop token, the stop token itself
          SHOULD BE INCLUDED as part of the output. This is not enforced
          across models right now, but it's a good practice to follow since
          it makes it much easier to parse the output of the model
          downstream and understand why generation stopped.
    run_manager: A run manager with callbacks for the LLM.

Returns:
    result: ChatResult with model generation
r   r$  r\   Fr   )r3   generation_infogenerations)	r\   _streamr   rK   r   r  r$   r  r&   )
r{   rG   r   r$  r   stream_iterr   r   r3   
generations
             rD   	_generateChatSambaNovaCloud._generatey  s    . >>,,#@FK +K88.x8''XXQWX((2#!:!:?!K

 zl33rF   c              +      #    [        U5      nU R                  " XR4SS0UD6nU R                  U5       H1  n[        US9nU(       a  UR	                  UR
                  US9  Uv   M3     g7f)a  
Stream the output of the SambaNovaCloud chat model.

Args:
    messages: the prompt composed of a list of messages.
    stop: a list of strings on which the model should stop generating.
          If generation stops due to a stop token, the stop token itself
          SHOULD BE INCLUDED as part of the output. This is not enforced
          across models right now, but it's a good practice to follow since
          it makes it much easier to parse the output of the model
          downstream and understand why generation stopped.
    run_manager: A run manager with callbacks for the LLM.

Yields:
    chunk: ChatGenerationChunk with model partial generation
r\   Tr3   r   N)rK   r   r"  r%   on_llm_new_tokenr   )	r{   rG   r   r$  r   r   r   ai_message_chunkr   s	            rD   r*  ChatSambaNovaCloud._stream  sm     . /x8''WWPVW $ = =h G'0@AE,,UZZu,EK	 !Hs   A(A*rh   rN   NFNN)<rj   rk   rl   rm   __doc__r0   rY   r   __annotations__r1   rZ   r[   r\   r   r^   intr`   floatra   r
   rb   rd   r   r   re   rp   classmethodrv   propertyr|   r   r   r   r   r   r   r   r*   r'   r   r   r   r/   r	   r   r   r2   r   r   r  r   r   r"  r   r&   r-  r%   r*  ro   __classcell__r   s   @rD   rU   rU   o   s   Ob r*M3*#(2#?y?!;<E3<E*It*KD)J) s+K+"40E8E?0 .E8C=.%*OT3J%KNDcNKA).r):S#X:/    4   :DcN : : 
T#s(^ 
 
  *3 * *# # #" CG.3%=d38nd3i#s(9KXUVW%= eDcND#$=>?	%=
 &d^%= %= 
$k1	2%= %=R DHv' !v'tCH~tI>?@v' :
	v' v' v' 
$eDcNI,E&FF	Gv'v	 %)	;T#s(^,; tCy!; 	;
 ; 
;z9( 9y 9vM M	"	#Md %):>	&4{#&4 tCy!&4 67	&4
 &4 
&4V %):>	{# tCy! 67	
  
%	& rF   rU   z#langchain_sambanova.ChatSambaStudioc                     ^  \ rS rSr% Sr\" SS9r\\S'    \" \	" S5      S9r
\	\S'    \" SSS9r\\S	'    \" SSS9r\\S
'    \" SS9r\\   \S'    \" SS9r\\S'    \" SS9r\\S'    \" SS9r\\   \S'    \" SS9r\\   \S'    \" SS9r\\   \S'    \" SS9r\\   \S'    \" SS9r\\   \S'    \" SS0S9r\\\4   \S'    \" SSSSS.S9r\\\4   \S'    Sr\\\\4      \S'    \" 0 S9r\\\4   \S '     " S! S"5      r\ S#\4S$ j5       r!\"S#\\\4   4S% j5       r#\"S#\\\4   4S& j5       r$\"S#\4S' j5       r%S(\S#S4U 4S) jjr&SSS*.S+\'\(\\\4   \)\   \*S,\4   \+4      S-\\(\\\4   \\4      S.\\   S(\S#\,\-\.4   4
U 4S/ jjjr/ SFS0SS1.S2\\(\\\4   \)\0   4      S3\1S4   S5\S(\S#\,\-\(\\\4   \04   4   4
S6 jjjr2S7\.S#\4S8 jr3S9\4\.   S(\S#\4S: jr5S;\S#\6\\4   4S< jr7  SGS9\4\.   S=\\4\      S\\   S(\S#\84
S> jjr9S?\8S#\:4S@ jr;S?\8S#\<\=   4SA jr>  SHS9\4\.   S=\\4\      SB\\?   S(\S#\@4
SC jjrA  SHS9\4\.   S=\\4\      SB\\?   S(\S#\<\B   4
SD jjrCSErDU =rE$ )IChatSambaStudioi  u  
SambaStudio chat model.

Setup:
    To use, you should have the environment variables:
    `SAMBASTUDIO_URL` set with your SambaStudio deployed endpoint URL.
    `SAMBASTUDIO_API_KEY` set with your SambaStudio deployed endpoint Key.
    https://docs.sambanova.ai/sambastudio/latest/index.html
    Example:

    .. code-block:: python

        ChatSambaStudio(
            sambastudio_url = set with your SambaStudio deployed endpoint URL,
            sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
            model = model or expert name (set for Bundle endpoints),
            max_tokens = max number of tokens to generate,
            temperature = model temperature,
            top_p = model top p,
            top_k = model top k,
            do_sample = wether to do sample
            process_prompt = wether to process prompt
                (set for Bundle generic v1 and v2 endpoints)
            stream_options = include usage to get generation metrics
            special_tokens = start, start_role, end_role, end special tokens
                (set for Bundle generic v1 and v2 endpoints when process prompt
                 set to false or for StandAlone v1 and v2 endpoints)
            model_kwargs: Optional = Extra Key word arguments to pass to the model.
        )

Key init args — completion params:
    model: str
        The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096
        (set for Bundle endpoints).
    streaming: bool
        Whether to use streaming
    max_tokens: inthandler when using non streaming methods
        max tokens to generate
    temperature: float
        model temperature
    top_p: float
        model top p
    top_k: int
        model top k
    do_sample: bool
        wether to do sample
    process_prompt:
        wether to process prompt (set for Bundle generic v1 and v2 endpoints)
    stream_options: dict
        stream options, include usage to get generation metrics
    special_tokens: dict
        start, start_role, end_role and end special tokens
        (set for Bundle generic v1 and v2 endpoints when process prompt set to false
         or for StandAlone v1 and v2 endpoints) default to llama3 special tokens
    model_kwargs: dict
        Extra Key word arguments to pass to the model.

Key init args — client params:
    sambastudio_url: str
        SambaStudio endpoint Url
    sambastudio_api_key: str
        SambaStudio endpoint api key

Instantiate:
    .. code-block:: python

        from langchain_community.chat_models import ChatSambaStudio

        chat = ChatSambaStudio=(
            sambastudio_url = set with your SambaStudio deployed endpoint URL,
            sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
            model = model or expert name (set for Bundle endpoints),
            max_tokens = max number of tokens to generate,
            temperature = model temperature,
            top_p = model top p,
            top_k = model top k,
            do_sample = wether to do sample
            process_prompt = wether to process prompt
                (set for Bundle generic v1 and v2 endpoints)
            stream_options = include usage to get generation metrics
            special_tokens = start, start_role, end_role, and special tokens
                (set for Bundle generic v1 and v2 endpoints when process prompt
                 set to false or for StandAlone v1 and v2 endpoints)
            model_kwargs: Optional = Extra Key word arguments to pass to the model.
        )

Invoke:
    .. code-block:: python

        messages = [
            SystemMessage(content="your are an AI assistant."),
            HumanMessage(content="tell me a joke."),
        ]
        response = chat.invoke(messages)

Stream:
    .. code-block:: python

        for chunk in chat.stream(messages):
            print(chunk.content, end="", flush=True)

Async:
    .. code-block:: python

        response = chat.ainvoke(messages)
        await response

Tool calling:
    .. code-block:: python

        from pydantic import BaseModel, Field

        class GetWeather(BaseModel):
            '''Get the current weather in a given location'''

            location: str = Field(
                ...,
                description="The city and state, e.g. Los Angeles, CA"
            )

        llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
        ai_msg = llm_with_tools.invoke("Should I bring my umbrella today in LA?")
        ai_msg.tool_calls

    .. code-block:: python

        [
            {
                'name': 'GetWeather',
                'args': {'location': 'Los Angeles, CA'},
                'id': 'call_adf61180ea2b4d228a'
            }
        ]

Structured output:
    .. code-block:: python

        from typing import Optional

        from pydantic import BaseModel, Field

        class Joke(BaseModel):
            '''Joke to tell user.'''

            setup: str = Field(description="The setup of the joke")
            punchline: str = Field(description="The punchline to the joke")

        structured_model = llm.with_structured_output(Joke)
        structured_model.invoke("Tell me a joke about cats")

    .. code-block:: python

        Joke(setup="Why did the cat join a band?",
        punchline="Because it wanted to be the purr-cussionist!")

    See `ChatSambaStudio.with_structured_output()` for more.

Token usage:
    .. code-block:: python

        response = chat.invoke(messages)
        print(response.response_metadata["usage"]["prompt_tokens"]
        print(response.response_metadata["usage"]["total_tokens"]

Response metadata
    .. code-block:: python

        response = chat.invoke(messages)
        print(response.response_metadata)
r<   rW   sambastudio_urlsambastudio_api_keyT)rX   excludebase_urlstreaming_urlNr[   Fr\   r]   r^   r_   r`   ra   rb   	do_sampleprocess_promptrc   rd   z<|begin_of_text|>z;<|begin_of_text|><|start_header_id|>{role}<|end_header_id|>z
<|eot_id|>z.<|start_header_id|>assistant<|end_header_id|>
)start
start_roleend_roleendspecial_tokensmodel_kwargsre   c                       \ rS rSrSrSrg)ChatSambaStudio.Configi  Trh   Nri   rh   rF   rD   rp   rO    rq   rF   rp   r4   c                     grs   rh   rt   s    rD   rv   "ChatSambaStudio.is_lc_serializable  rx   rF   c                     SSS.$ )NrA  rB  )rA  rB  rh   rz   s    rD   r|   ChatSambaStudio.lc_secrets  s      1#8
 	
rF   c                     U R                   U R                  U R                  U R                  U R                  U R
                  U R                  U R                  U R                  U R                  U R                  S.$ )r   r[   r\   r^   r`   ra   rb   rF  rG  rd   rL  rM  rU  rz   s    rD   r   #ChatSambaStudio._identifying_params  se     ZZ//++ZZZZ"11"11"11 --
 	
rF   c                     g)r   zsambastudio-chatmodelrh   rz   s    rD   r   ChatSambaStudio._llm_type  s     'rF   r   c                    > [        USS5      US'   [        [        USS5      5      US'   U R                  US   5      u  US'   US'   [        TU ]  " S0 UD6  g)	r   rA  SAMBASTUDIO_URLrB  SAMBASTUDIO_API_KEYrD  rE  Nrh   )r,   r+   _get_sambastudio_urlsr   r   r   s     rD   r   ChatSambaStudio.__init__  sx    $8%'8%
 ! )> )>@UV)
$% 7;6P6P$%7
3zF?3 	"6"rF   r   r   .r   r   c                ^  > U Vs/ sH  n[        U5      PM     nnU(       ak  [        U[        5      (       a	  US;  a  SnOO[        U[        5      (       a
  U(       a  SnO0[        U[        5      (       a  [        S5      e[        SU 35      eSnX$S'   X4S'   [        TU ]  " S	SU0UD6$ s  snf r   r   r   s          rD   r   ChatSambaStudio.bind_tools  r   r   r   r   r   r   r   r   c                   U(       a  [        SU 35      e[        U5      nUS:X  aN  Uc  [        S5      e[        U5      S   S   nU R                  U/US9nU(       a  [	        U/SS9nO[        USS	9nOUS
:X  a5  U nU(       a!  [        [        [           U5      n[        US9nOc[        5       nOXUS:X  aC  Uc  [        S5      eU nU(       a!  [        [        [           U5      n[        US9nO[        5       nO[        SU S35      eU(       aT  [        R                  " [        S5      U-  S S9n	[        R                  " S S9n
U	R                  U
/SS9n[        US9U-  $ Xx-  $ )a?  Model wrapper that returns outputs formatted to match the given schema.

        Args:
            schema:
                The output schema. Can be passed in as:
                    - an OpenAI function/tool schema,
                    - a JSON Schema,
                    - a TypedDict class,
                    - or a Pydantic class.
                If `schema` is a Pydantic class then the model output will be a
                Pydantic instance of that class, and the model-generated fields will be
                validated by the Pydantic class. Otherwise the model output will be a
                dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
                for more on how to properly specify types and descriptions of
                schema fields when specifying a Pydantic or TypedDict class.

            method:
                The method for steering model generation, either "function_calling"
                "json_mode" or "json_schema".
                If "function_calling" then the schema will be converted
                to an OpenAI function and the returned model will make use of the
                function-calling API. If "json_mode" or "json_schema" then OpenAI's
                JSON mode will be used.
                Note that if using "json_mode" or "json_schema" then you must include instructions
                for formatting the output into the desired schema into the model call.

            include_raw:
                If False then only the parsed structured output is returned. If
                an error occurs during model output parsing it will be raised. If True
                then both the raw model response (a BaseMessage) and the parsed model
                response will be returned. If an error occurs during output parsing it
                will be caught and returned as well. The final output is always a dict
                with keys "raw", "parsed", and "parsing_error".

        Returns:
            A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.

            If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
            an instance of `schema` (i.e., a Pydantic object).

            Otherwise, if `include_raw` is False then Runnable outputs a dict.

            If `include_raw` is True, then Runnable outputs a dict with keys:
                - `"raw"`: BaseMessage
                - `"parsed"`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
                - `"parsing_error"`: Optional[BaseException]

        Example: schema=Pydantic class, method="function_calling", include_raw=False:
            .. code-block:: python

                from typing import Optional

                from langchain_community.chat_models import ChatSambaStudio
                from pydantic import BaseModel, Field


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: str = Field(
                        description="A justification for the answer."
                    )


                llm = ChatSambaStudio(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )

                # -> AnswerWithJustification(
                #     answer='They weigh the same',
                #     justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same.'
                # )

        Example: schema=Pydantic class, method="function_calling", include_raw=True:
            .. code-block:: python

                from langchain_community.chat_models import ChatSambaStudio
                from pydantic import BaseModel


                class AnswerWithJustification(BaseModel):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: str


                llm = ChatSambaStudio(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification, include_raw=True
                )

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'arguments': '{"answer": "They weigh the same.", "justification": "A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount."}', 'name': 'AnswerWithJustification'}, 'id': 'call_17a431fc6a4240e1bd', 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls', 'usage': {'acceptance_rate': 5, 'completion_tokens': 53, 'completion_tokens_after_first_per_sec': 343.7964936837758, 'completion_tokens_after_first_per_sec_first_ten': 439.1205661878638, 'completion_tokens_per_sec': 162.8511306784833, 'end_time': 1731527851.0698032, 'is_last_response': True, 'prompt_tokens': 213, 'start_time': 1731527850.7137961, 'time_to_first_token': 0.20475482940673828, 'total_latency': 0.32545061111450196, 'total_tokens': 266, 'total_tokens_per_sec': 817.3283162354066}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731527850}, id='95667eaf-447f-4b53-bb6e-b6e1094ded88', tool_calls=[{'name': 'AnswerWithJustification', 'args': {'answer': 'They weigh the same.', 'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'}, 'id': 'call_17a431fc6a4240e1bd', 'type': 'tool_call'}]),
                #     'parsed': AnswerWithJustification(answer='They weigh the same.', justification='A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'),
                #     'parsing_error': None
                # }

        Example: schema=TypedDict class, method="function_calling", include_raw=False:
            .. code-block:: python

                # IMPORTANT: If you are using Python <=3.8, you need to import Annotated
                # from typing_extensions, not from typing.
                from typing_extensions import Annotated, TypedDict

                from langchain_community.chat_models import ChatSambaStudio


                class AnswerWithJustification(TypedDict):
                    '''An answer to the user question along with justification for the answer.'''

                    answer: str
                    justification: Annotated[
                        Optional[str], None, "A justification for the answer."
                    ]


                llm = ChatSambaStudio(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'
                # }

        Example: schema=OpenAI function schema, method="function_calling", include_raw=False:
            .. code-block:: python

                from langchain_community.chat_models import ChatSambaStudio

                oai_schema = {
                    'name': 'AnswerWithJustification',
                    'description': 'An answer to the user question along with justification for the answer.',
                    'parameters': {
                        'type': 'object',
                        'properties': {
                            'answer': {'type': 'string'},
                            'justification': {'description': 'A justification for the answer.', 'type': 'string'}
                        },
                       'required': ['answer']
                   }
                }

                llm = ChatSambaStudio(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(oai_schema)

                structured_llm.invoke(
                    "What weighs more a pound of bricks or a pound of feathers"
                )
                # -> {
                #     'answer': 'They weigh the same',
                #     'justification': 'A pound is a unit of weight or mass, so one pound of bricks and one pound of feathers both weigh the same amount.'
                # }

        Example: schema=Pydantic class, method="json_mode", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaStudio
                from pydantic import BaseModel

                class AnswerWithJustification(BaseModel):
                    answer: str
                    justification: str

                llm = ChatSambaStudio(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(
                    AnswerWithJustification,
                    method="json_mode",
                    include_raw=True
                )

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 5.3125, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 292.65701089829776, 'completion_tokens_after_first_per_sec_first_ten': 346.43324678555325, 'completion_tokens_per_sec': 200.012158915008, 'end_time': 1731528071.1708555, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528070.737394, 'time_to_first_token': 0.16693782806396484, 'total_latency': 0.3949759876026827, 'total_tokens': 149, 'total_tokens_per_sec': 377.2381225105847}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528070}, id='83208297-3eb9-4021-a856-ca78a15758df'),
                #     'parsed': AnswerWithJustification(answer='They are the same weight', justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'),
                #     'parsing_error': None
                # }

        Example: schema=None, method="json_mode", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaStudio

                llm = ChatSambaStudio(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 4.722222222222222, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 357.1315485254867, 'completion_tokens_after_first_per_sec_first_ten': 416.83279609305305, 'completion_tokens_per_sec': 240.92819585198137, 'end_time': 1731528164.8474727, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528164.4906917, 'time_to_first_token': 0.13837409019470215, 'total_latency': 0.3278985247892492, 'total_tokens': 149, 'total_tokens_per_sec': 454.4088757208256}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528164}, id='15261eaf-8a25-42ef-8ed5-f63d8bf5b1b0'),
                #     'parsed': {
                #         'answer': 'They are the same weight',
                #         'justification': 'A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'},
                #     },
                #     'parsing_error': None
                # }

        Example: schema=None, method="json_schema", include_raw=True:
            .. code-block::

                from langchain_community.chat_models import ChatSambaStudio

                class AnswerWithJustification(BaseModel):
                    answer: str
                    justification: str

                llm = ChatSambaStudio(model="Meta-Llama-3.1-70B-Instruct", temperature=0)
                structured_llm = llm.with_structured_output(AnswerWithJustification, method="json_schema", include_raw=True)

                structured_llm.invoke(
                    "Answer the following question. "
                    "Make sure to return a JSON blob with keys 'answer' and 'justification'.

"
                    "What's heavier a pound of bricks or a pound of feathers?"
                )
                # -> {
                #     'raw': AIMessage(content='{
  "answer": "They are the same weight",
  "justification": "A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities."
}', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'usage': {'acceptance_rate': 5.3125, 'completion_tokens': 79, 'completion_tokens_after_first_per_sec': 292.65701089829776, 'completion_tokens_after_first_per_sec_first_ten': 346.43324678555325, 'completion_tokens_per_sec': 200.012158915008, 'end_time': 1731528071.1708555, 'is_last_response': True, 'prompt_tokens': 70, 'start_time': 1731528070.737394, 'time_to_first_token': 0.16693782806396484, 'total_latency': 0.3949759876026827, 'total_tokens': 149, 'total_tokens_per_sec': 377.2381225105847}, 'model_name': 'Meta-Llama-3.1-70B-Instruct', 'system_fingerprint': 'fastcoe', 'created': 1731528070}, id='83208297-3eb9-4021-a856-ca78a15758df'),
                #     'parsed': AnswerWithJustification(answer='They are the same weight', justification='A pound is a unit of weight or mass, so a pound of bricks and a pound of feathers both weigh the same amount, one pound. The difference is in their density and volume. A pound of feathers would take up more space than a pound of bricks due to the difference in their densities.'),
                #     'parsing_error': None
                # }

        r   r   zJschema must be specified when method is 'function_calling'. Received None.r   r   r   Tr   r   r   r   r   zGschema must be specified when method is not 'json_mode'. Received None.z\Unrecognized method argument. Expected one of 'function_calling' or 'json_mode'. Received: ''r   c                     g rN   rh   r   s    rD   r   8ChatSambaStudio.with_structured_output.<locals>.<lambda>9  r   rF   r   c                     g rN   rh   r   s    rD   r   rc  ;  r   rF   r   r   r   r   r   r   s               rD   r   &ChatSambaStudio.with_structured_output
  s   p >vhGHH/7''~ %  /v6zB6JI//6(	/BC!7J!($(8
 !9&! {"C "d9ov6 4V L 0 2}$~ %  C "d9ov6 4V L 0 2++1(!5 
 /66!%(=8M .44NKK#0#?#?_ $@ $  3'*>>>&&rF   r3   c                 ,   [        U[        5      (       a  SnU$ [        U[        5      (       a  SnU$ [        U[        5      (       a  SnU$ [        U[        5      (       a  SnU$ [        U[
        5      (       a  UR                  nU$ [        SU 35      e)z
Get the role of LangChain BaseMessage

Args:
    message: LangChain BaseMessage

Returns:
    str: Role of the LangChain BaseMessage
r8   r9   r:   r=   r?   )r@   r   r   r   r   r   r6   rB   )r{   r3   r6   s      rD   	_get_roleChatSambaStudio._get_roleC  s     g}--D  ..D  ++D  --D
 	 --<<D  /y9::rF   rG   c                    U R                   (       Ga  S/ S.UEnU H  n[        U[        5      (       aZ  UR                  U R	                  U5      UR
                  S.nSUR                  ;   a   UR                  S   US'   US   S:X  a  SUS'   Os[        U[        5      (       a5  UR                  U R	                  U5      UR
                  UR                  S.nO)UR                  U R	                  U5      UR
                  S.nUS	   R                  U5        M     [        R                  " U5      nU$ S
UR                  5       ;   a  [        S5      eU R                  S   nU HT  nX`R                  S   R                  U R	                  U5      S9-  nUSUR
                   S3-  nX`R                  S   -  nMV     X`R                  S   -  nU$ )aT  
Convert a list of BaseMessages to a:
- dumped json string with Role / content dict structure
    when process_prompt is true,
- string with special tokens if process_prompt is false
for generic V1 and V2 endpoints

Args:
    messages: list of BaseMessages

Returns:
    str: string to send as model input depending on process_prompt param
zsambaverse-conversation-id)conversation_idrG   )
message_idr6   r7   r;   r7   r<   N)rk  r6   r7   r>   rG   r   ztool calling not supported in API Generic V2 without process_prompt, switch to OpenAI compatible API or Generic V2 API with process_prompt=TruerH  rI  )r6    rJ  rK  )rG  r@   r   r  rg  r7   rA   r   r>   r  r   r  keysNotImplementedErrorrL  format)r{   rG   r   messages_dictr3   rC   messages_strings          rD   _messages_to_string#ChatSambaStudio._messages_to_string[  s    #?- -M
 $gy11&-jj $w 7#*??$L
 $w'@'@@5<5N5N(6\2 (	2b86:L355&-jj $w 7#*??(/(<(<	$L '.jj $w 7#*??$L j)00>; $> #jj7O$  &++-')A 
 #11':O##6#6|#D#K#K0 $L $   Qw&7q#99#6#6z#BB $ 22599OrF   urlc                     SU;   a  UnUnX#4$ SU;   a  UR                  SS5      nUnX#4$ UnSU;   a#  SR                  UR                  S5      5      nX#4$ [        S5      e)z
Get streaming and non streaming URLs from the given URL

Args:
    url: string with sambastudio base or streaming endpoint url

Returns:
    base_url: string with url to do non streaming calls
    streaming_url: string with url to do streaming calls
chat/completionsr   zstream/r<   genericzgeneric/streamUnsupported URL)replacejoinsplitr   )r{   rt  rD  
stream_urls       rD   r\  %ChatSambaStudio._get_sambastudio_urls  s     $HJ ## 3;;y"5 
 ## #!1!6!6syy7K!LJ ## %%677rF   r   c           
      :   SU R                   ;   a  [        U5      nUU R                  UU R                  U R                  U R
                  U R                  UU R                  S.	UEnUR                  5        VVs0 sH  u  pxUc  M
  Xx_M     nnnSU R                  R                  5        3SS.U R                  En	GODSU R                   ;   a  SU R                  " U40 UD6S./n
U R                  U R                  U R                  U R                  U R
                  U R                  U R                  S	.nU R                  b  0 UEU R                  EnUR                  5        VVs0 sH  u  pxUc  M
  Xx_M     nnnXS
.nSU R                  R                  5       0U R                  En	GOZSU R                   ;   Ga0  SUR!                  5       ;   a  [#        S5      eU R                  U R                  U R                  U R                  U R
                  U R                  U R                  S	.UEnU R                  b  0 UEU R                  EnUR                  5        VVs0 sH,  u  pxUc  M
  U[%        U5      R&                  [)        U5      S._M.     nnnU(       a  U R                  U5      US.nOU R                  U5      /US.nSU R                  R                  5       0U R                  En	O[+        SU R                    S35      e[,        R.                  " 5       nU(       a  UR1                  U R2                  XSS9nOUR1                  U R4                  XSS9nUR6                  S:w  a&  [9        SUR6                   SUR:                   S35      eU$ s  snnf s  snnf s  snnf )z
Performs a post request to the LLM API.

Args:
messages_dicts: List of role / content dicts to use as input.
stop: list of stop tokens
streaming: wether to do a streaming call

Returns:
    A request Response object
rv  r   r   r   r   api/v2/predict/genericitem0)r  value)select_expertrG  max_tokens_to_generater`   ra   rb   rF  )itemsparamskeyapi/predict/genericr   z_tool calling not supported in API Generic V1, switch to OpenAI compatible API or Generic V2 API)rO   r  )instancer  )	instancesr  rx  9only openai, generic v1 and generic v2 APIs are supportedTr   Fr   r   r   )rA  rK   r^   r[   r`   ra   rb   rd   r  rB  r   re   rr  rG  rF  rM  rm  rn  rO   rj   r   r   r   r   r   rE  rD  r   r   r   )r{   rG   r   r\   r   r   r   r  r  r   r  r  r   r   s                 rD   r   ChatSambaStudio._handle_request  s   ( !5!5528<N*"oo#//#"&"5"5 D 26S:3JCJDS#*++<<>?"A 2 ))	G &)=)==)A)A()Uf)UVE "&"&"5"5*.//#//!^^&F   ,8F8d&7&7839<<>W>ZSUjcj>FW"5Dt//@@B))G #d&:&::&++-')H 
 "&"&"5"5*.//#//!^^	 	F   ,8F8d&7&78 #),,."0JC Id5k22SZHH"0  
  $ 8 8 B$ #'":":8"D!E$
 t//@@B))G !$"6"6!7KL 
  '')#((""Gt ) H $((w% ) H 3&C''(==/$ 
 w T0 X4s$   9NN#N/NN$&Nr   c           
      v    UR                  5       n0 n/ n/ nSU R                  ;   a  US   S   S   R                  SS	5      nUc  S	nUS
   nUS   S   S   UR                  S5      US   US   US   S.n	US   S   S   R                  S5      n
U
(       aq  XS'   U
 Hg  n[        US   S   [        5      (       a/  [         R                  " US   R                  S0 5      5      US   S'    UR                  [        USS95        Mi     OSU R                  ;   a  US   S   S   S   nUS   S   S
   nUS   S   n	US   S   S   R                  S5      n
U
(       aq  XS'   U
 Hg  n[        US   S   [        5      (       a/  [         R                  " US   R                  S0 5      5      US   S'    UR                  [        USS95        Mi     O9SU R                  ;   a  US   S   S   nSnUn	O[        SU R                   S35      e[        UUUUU	US9$ ! [         a   n[        SU SUR                   35      eSnAff = f! [         a0  nUR                  [        U[        U5      5      5         SnAGM  SnAff = f! [         a0  nUR                  [        U[        U5      5      5         SnAGMR  SnAff = f)r   r   r   Nrv  r   r   r3   r7   r<   r  r   r   r[   r   r   r   r;   r   r   Tr   r  r  r  
completionr  predictionsrx  r  r  )r   r  r   r   rA  r  r@   r   r  r  r#   r"   r   r   r   )r{   r   r	  r
  rA   r;   r  r7   r  r  r  r  s               rD   r  !ChatSambaStudio._process_response;  s:   	$MMOM -/
 !5!55#I.q1)<@@BOGt$B!.y!9!!<_!M&**73+G4&34H&I(3! +95a8CGGUN2@,/%3M!-
";K"H$OOAE)*599+rJBj1+>"))+MTJ &4 &)=)==#G,Q/8FGw'*40B -g 6q 9*73A6w?CCLQN2@,/%3M!-
";K"H$OOAE)*599+rJBj1+>"))+MTJ &4 #d&:&::#M215lCGB - !$"6"6!7KL 
 /!1/
 	
U  	MaSX]]O- 	F % *112=#a&I , % *112=#a&I sG   H +I/I>
H>H99H>
I;$I66I;>
J8$J33J8c              #     #     SSK nSU R                  ;   Ga  SnUR                  U5      nUR	                  5        GH  nUR
                  S:X  a&  [        SUR                   SUR                   S35      e UR                  S	:w  GaR  [        UR                  [        5      (       a!  [        R                  " UR                  5      nO&[        SUR                   SUR                   S35      eUR                  S
5      (       a&  [        SUR                   SUR                   S35      e[        US   5      S:  a-  US   S   R                  S5      nUS   S   S   S   nUS   n0 n	O'SnUS   nUUR                  S5      US   US   US   S.n	UR                  S5      b'  SnUS   nUUR                  S5      US   US   US   S.n	[        UUU	0 S9v   GM  GM     gSU R                  ;   Ga  UR#                  5        GH  n [        R                  " U5      nUS   S   S   S   S   nUS   S   S   S   nUS   S   S   S   S   (       Ga:  US   S   S   S   R                  S5      US   S   S   S   R                  S5      US   S   S   S   R                  S 5      US   S   S   S   R                  S!5      US   S   S   S   R                  S"5      US   S   S   S   R                  S#5      US   S   S   S   R                  S$5      US   S   S   S   R                  S%5      US   S   S   S   R                  S&5      US   S   S   S   R                  S'5      US   S   S   S   R                  S(5      S).	S*.n	O0 n	[        UUU	0 S9v   GM     gS,U R                  ;   Ga~  UR#                  5        GHh  n [        R                  " U5      nUS   S-   S   S   nSnUS   S-   S   S   (       Ga  US   S-   S   R                  S5      US   S-   S   R                  S5      US   S-   S   R                  S 5      US   S-   S   R                  S!5      US   S-   S   R                  S"5      US   S-   S   R                  S#5      US   S-   S   R                  S$5      US   S-   S   R                  S%5      US   S-   S   R                  S&5      US   S-   S   R                  S'5      US   S-   S   R                  S(5      S).	S*.n	O0 n	[        UUU	0 S9v   GMk     g[%        S.U R                   S/35      e! [         a    [        S5      ef = f! [          a   n
[        SU
 SUR                   35      eSn
A
ff = f! [          a  n
[        SU
 S+U 35      eSn
A
ff = f! [          a  n
[        SU
 S+U 35      eSn
A
ff = f7f)0r  r   Nr  rv  r<   r  r   r   r  r   r   r   r  r7   r  r   r[   r   r   r   r  r  r  r  resultr  r  stream_tokenis_last_responsestop_reasonpromptprompt_tokens_countcompletion_tokens_counttotal_tokens_count
start_timeend_timemodel_execution_timetime_to_first_tokenthroughput_after_first_tokenbatch_size_used)	r  r  r  r  r  r  r  r  r  )r   r  r   zline: r  	responsesrx  r  )r  r  rA  r  r  r  r   r   r   r@   r   r   r  r  r  r   r  
iter_linesr   )r{   r   r  r   r  r  r   r7   r  r!  r
  lines               rD   r"  (ChatSambaStudio._process_stream_response  sD    	 !5!55M((2F;;-/&K#//0 ::,a) 
5zzX-%ejj#66#'::ejj#9D"."S#+#7#7"8#(::,a!1# 
  88G,,"."S#+#7#7"8#(::,a!1# 
 tI/!3,0OA,>,B,B?,SM&*9oa&8&A)&LG!%dB')H&(G!%dB1>)-'):.27m6:;O6P+/	?(H  88G,8&(G!%dB1>)-'):.27m6:;O6P+/	?(H -$+!.6.0	 Q . )~ &)=)== ++-5::d+D"8nW5a8A.QGh03D9BH~g.q1':;MNN-1(^G-DQ-G-P-T-T -. '+8nW&=a&@&I&M&Mh&W7;H~g7Nq7Q$+8""%#&;"<;?>';RST;U$+<""%#&?"@6:8nW6Ma6P$+7""%#&:";.28nW.Ea.H.Q.U.U$0/" -1N7,CA,Fw,O,S,S$.-" 9=Xw8OPQ8R$+9""%#&<"=7;H~g7Nq7Q$+8""%#&;"<@DXw@W$%A"")A++.3/M+N37>'3J13M$+4""%#&7"87&"$H $&( '*2*,	 W .r #d&:&:: ++-5::d+D"8n[9!<^LGBH~k2156HII-1(^K-H-K-O-O -. '+8n[&A!&D&H&H&R7;H~k7R$%8""%#&;"<;?>+;V$%<""%#&?"@6:8n[6Q$%7""%#&:";.28n[.I!.L.P.P$0/" -1N;,G,J,N,N$.-" 9=X{8S$%9""%#&<"=7;H~k7R$%8""%#&;"<@DX$/A""#A%%(S)G%H37>+3Nq3Q3U3U$54"3&"$H $&( '*2*,	 W .r !$"6"6!7KL {  	E 	D ! &MaS . t ! &MaS ( t ! &MaS ( s   WU
 A.W7E U#.WFV+WE#V3+W
U  W#
V-VVW
V0V++V00W3
W=WWWr$  c                     U R                   (       a'  U R                  " U4X#S.UD6nU(       a  [        U5      $ U R                  " X4SS0UD6nU R	                  U5      n[        US9n[        U/S9$ )ap  
Call SambaStudio models.

Args:
    messages: the prompt composed of a list of messages.
    stop: a list of strings on which the model should stop generating.
          If generation stops due to a stop token, the stop token itself
          SHOULD BE INCLUDED as part of the output. This is not enforced
          across models right now, but it's a good practice to follow since
          it makes it much easier to parse the output of the model
          downstream and understand why generation stopped.
    run_manager: A run manager with callbacks for the LLM.

Returns:
    result: ChatResult with model generation
r&  r\   Fr0  r(  )r\   r*  r   r   r  r$   r&   )	r{   rG   r   r$  r   r+  r   r3   r,  s	            rD   r-  ChatSambaStudio._generatem  s}    . >>,,#@FK +K88''R%R6R((2#G4
zl33rF   c              +      #    U R                   " X4SS0UD6nU R                  U5       H1  n[        US9nU(       a  UR                  UR                  US9  Uv   M3     g7f)a  
Stream the output of the SambaStudio model.

Args:
    messages: the prompt composed of a list of messages.
    stop: a list of strings on which the model should stop generating.
          If generation stops due to a stop token, the stop token itself
          SHOULD BE INCLUDED as part of the output. This is not enforced
          across models right now, but it's a good practice to follow since
          it makes it much easier to parse the output of the model
          downstream and understand why generation stopped.
    run_manager: A run manager with callbacks for the LLM.

Yields:
    chunk: ChatGenerationChunk with model partial generation
r\   Tr0  r1  N)r   r"  r%   r2  r   )r{   rG   r   r$  r   r   r3  r   s           rD   r*  ChatSambaStudio._stream  sb     . ''Q$Q&Q $ = =h G'0@AE,,UZZu,EK	 !Hs   AArh   rN   r5  r6  )Frj   rk   rl   rm   r7  r0   rA  r   r8  r1   rB  rD  rE  r[   r
   r\   r   r^   r9  r`   r:  ra   rb   rF  rG  rd   r   r   rL  rM  re   rp   r;  rv   r<  r|   r   r   r   r   r   r   r   r*   r'   r   r   r   r/   r	   r   rg  r   rr  r   r\  r2   r   r   r  r   r   r"  r   r&   r-  r%   r*  ro   r=  r>  s   @rD   r@  r@    s   iV !,OS,%*9R=%AA"d3Hc3'r48M38# .E8C=.GE*It*KD)J) #(#5K%5"40E8E?0 .E8C=. %d 3Ix~3 %*4%8NHTN8I%*OT3J%KNDcNKA%*(W$D	
&NDcN (
 .2L(4S>*12).r):S#X:/    4   
DcN 
 
 
T#s(^ 
 
( '3 ' '# # #$ CG.3%=d38nd3i#s(9KXUVW%= eDcND#$=>?	%=
 &d^%= %= 
$k1	2%= %=R DHw' !w'tCH~tI>?@w' :
	w' w' w' 
$eDcNI,E&FF	Gw'r	  0ED,= E EQT EN$ $sCx $: %)$)	}{#} tCy!} D>	}
 } 
}~_
( _
y _
BO O	"	#Oh %):>	 4{# 4 tCy! 4 67	 4
  4 
 4J %):>	{# tCy! 67	
  
%	& rF   r@  )Jr   operatorr   typingr   r   r   r   r   r	   r
   r   r   r   r   r   r   langchain_core._api.deprecationr   langchain_core.callbacksr   langchain_core.language_modelsr   *langchain_core.language_models.chat_modelsr   r   langchain_core.messagesr   r   r   r   r   r   r   r   langchain_core.output_parsersr   r   "langchain_core.output_parsers.baser   *langchain_core.output_parsers.openai_toolsr    r!   r"   r#   langchain_core.outputsr$   r%   r&   langchain_core.runnablesr'   r(   r)   langchain_core.toolsr*   langchain_core.utilsr+   r,   %langchain_core.utils.function_callingr-   langchain_core.utils.pydanticr.   pydanticr/   r0   r1   r2   r   rE   rK   r   rP   rU   r@  rh   rF   rD   <module>r     s.         6 >	 	 	 @  S R O O ) L H ? 0 0 k d38n DD$5 $tCH~:N @C @D @ 
?
J J
JZ 
<
em e
erF   