
    $hY                       S r SSKJr  SSKrSSKrSSKJrJrJr  SSK	J
r
JrJrJrJrJr  SSKrSSKJr  SSKJr  SSKJr  SS	KJrJr  SS
KJr  SSKJrJr  SSKJ r   SSK!J"r"J#r#J$r$J%r%  SSK&J'r'J(r(  SSK)J*r*  \RV                  " \,5      r-\" S\"S9r.\\/\0\
4   \1\.   4   r2\\/\.4   r3 " S S\5      r4SS jr5 " S S\*5      r6g)zAzure OpenAI chat wrapper.    )annotationsN)AsyncIterator	AwaitableIterator)AnyCallableOptional	TypedDictTypeVarUnion)LanguageModelInput)LangSmithParams)BaseMessage)ChatGenerationChunk
ChatResult)Runnable)from_envsecret_from_env)is_basemodel_subclass)	BaseModelField	SecretStrmodel_validator)LiteralSelf)BaseChatOpenAI_BM)boundc                  4    \ rS rSr% S\S'   S\S'   S\S'   Srg	)
_AllReturnType   r   rawzOptional[_DictOrPydantic]parsedzOptional[BaseException]parsing_error N)__name__
__module____qualname____firstlineno____annotations____static_attributes__r%       Z/var/www/html/shao/venv/lib/python3.13/site-packages/langchain_openai/chat_models/azure.pyr    r       s    	%%**r,   r    c                F    [        U [        5      =(       a    [        U 5      $ N)
isinstancetyper   )objs    r-   _is_pydantic_classr3   %   s    c4 ?%:3%??r,   c                    ^  \ rS rSr% Sr\" \" SSS9S9rS\S'    \" SS	S
9r	S\S'    \" S\" SSS9S9r
S\S'    \" S\" SS/SS9S9rS\S'    \" \" SSS9S9rS\S'    SrS\S'    SrS\S'    SrS\S'    \" \" SS S9S9rS\S!'    S"rS#\S$'    \" SS%S
9rS\S&'    \" SS9rS'\S('    \S;S) j5       r\S<S* j5       r\S=S+ j5       r\" S,S-9S>S. j5       r\S?U 4S/ jj5       r\S@S0 j5       r\S?S1 j5       r SA     SBU 4S2 jjjr SA     SCU 4S3 jjjr SDU 4S4 jjr!      SEU 4S5 jjr" SAS6S7SS8.           SFU 4S9 jjjjr#S:r$U =r%$ )GAzureChatOpenAI)   u5  Azure OpenAI chat model integration.

Setup:
    Head to the Azure `OpenAI quickstart guide <https://learn.microsoft.com/en-us/azure/ai-foundry/openai/chatgpt-quickstart?tabs=keyless%2Ctypescript-keyless%2Cpython-new%2Ccommand-line&pivots=programming-language-python>`__
    to create your Azure OpenAI deployment.

    Then install ``langchain-openai`` and set environment variables
    ``AZURE_OPENAI_API_KEY`` and ``AZURE_OPENAI_ENDPOINT``:

    .. code-block:: bash

        pip install -U langchain-openai

        export AZURE_OPENAI_API_KEY="your-api-key"
        export AZURE_OPENAI_ENDPOINT="https://your-endpoint.openai.azure.com/"

Key init args — completion params:
    azure_deployment: str
        Name of Azure OpenAI deployment to use.
    temperature: float
        Sampling temperature.
    max_tokens: Optional[int]
        Max number of tokens to generate.
    logprobs: Optional[bool]
        Whether to return logprobs.

Key init args — client params:
    api_version: str
        Azure OpenAI REST API version to use (distinct from the version of the
        underlying model). `See more on the different versions. <https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning>`__
    timeout: Union[float, Tuple[float, float], Any, None]
        Timeout for requests.
    max_retries: Optional[int]
        Max number of retries.
    organization: Optional[str]
        OpenAI organization ID. If not passed in will be read from env
        var ``OPENAI_ORG_ID``.
    model: Optional[str]
        The name of the underlying OpenAI model. Used for tracing and token
        counting. Does not affect completion. E.g. ``'gpt-4'``, ``'gpt-35-turbo'``, etc.
    model_version: Optional[str]
        The version of the underlying OpenAI model. Used for tracing and token
        counting. Does not affect completion. E.g., ``'0125'``, ``'0125-preview'``, etc.

See full list of supported init args and their descriptions in the params section.

Instantiate:
    .. code-block:: python

        from langchain_openai import AzureChatOpenAI

        llm = AzureChatOpenAI(
            azure_deployment="your-deployment",
            api_version="2024-05-01-preview",
            temperature=0,
            max_tokens=None,
            timeout=None,
            max_retries=2,
            # organization="...",
            # model="gpt-35-turbo",
            # model_version="0125",
            # other params...
        )

.. note::
    Any param which is not explicitly supported will be passed directly to the
    ``openai.AzureOpenAI.chat.completions.create(...)`` API every time to the model is
    invoked.

    For example:

    .. code-block:: python

        from langchain_openai import AzureChatOpenAI
        import openai

        AzureChatOpenAI(..., logprobs=True).invoke(...)

        # results in underlying API call of:

        openai.AzureOpenAI(..).chat.completions.create(..., logprobs=True)

        # which is also equivalent to:

        AzureChatOpenAI(...).invoke(..., logprobs=True)

Invoke:
    .. code-block:: python

        messages = [
            (
                "system",
                "You are a helpful translator. Translate the user sentence to French.",
            ),
            ("human", "I love programming."),
        ]
        llm.invoke(messages)

    .. code-block:: python

        AIMessage(
            content="J'adore programmer.",
            usage_metadata={
                "input_tokens": 28,
                "output_tokens": 6,
                "total_tokens": 34,
            },
            response_metadata={
                "token_usage": {
                    "completion_tokens": 6,
                    "prompt_tokens": 28,
                    "total_tokens": 34,
                },
                "model_name": "gpt-4",
                "system_fingerprint": "fp_7ec89fabc6",
                "prompt_filter_results": [
                    {
                        "prompt_index": 0,
                        "content_filter_results": {
                            "hate": {"filtered": False, "severity": "safe"},
                            "self_harm": {"filtered": False, "severity": "safe"},
                            "sexual": {"filtered": False, "severity": "safe"},
                            "violence": {"filtered": False, "severity": "safe"},
                        },
                    }
                ],
                "finish_reason": "stop",
                "logprobs": None,
                "content_filter_results": {
                    "hate": {"filtered": False, "severity": "safe"},
                    "self_harm": {"filtered": False, "severity": "safe"},
                    "sexual": {"filtered": False, "severity": "safe"},
                    "violence": {"filtered": False, "severity": "safe"},
                },
            },
            id="run-6d7a5282-0de0-4f27-9cc0-82a9db9a3ce9-0",
        )

Stream:
    .. code-block:: python

        for chunk in llm.stream(messages):
            print(chunk.text(), end="")

    .. code-block:: python

        AIMessageChunk(content="", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
        AIMessageChunk(content="J", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
        AIMessageChunk(content="'", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
        AIMessageChunk(content="ad", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
        AIMessageChunk(content="ore", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
        AIMessageChunk(content=" la", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
        AIMessageChunk(
            content=" programm", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f"
        )
        AIMessageChunk(
            content="ation", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f"
        )
        AIMessageChunk(content=".", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
        AIMessageChunk(
            content="",
            response_metadata={
                "finish_reason": "stop",
                "model_name": "gpt-4",
                "system_fingerprint": "fp_811936bd4f",
            },
            id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f",
        )

    .. code-block:: python

        stream = llm.stream(messages)
        full = next(stream)
        for chunk in stream:
            full += chunk
        full

    .. code-block:: python

        AIMessageChunk(
            content="J'adore la programmation.",
            response_metadata={
                "finish_reason": "stop",
                "model_name": "gpt-4",
                "system_fingerprint": "fp_811936bd4f",
            },
            id="run-ba60e41c-9258-44b8-8f3a-2f10599643b3",
        )

Async:
    .. code-block:: python

        await llm.ainvoke(messages)

        # stream:
        # async for chunk in (await llm.astream(messages))

        # batch:
        # await llm.abatch([messages])

Tool calling:
    .. code-block:: python

        from pydantic import BaseModel, Field


        class GetWeather(BaseModel):
            '''Get the current weather in a given location'''

            location: str = Field(
                ..., description="The city and state, e.g. San Francisco, CA"
            )


        class GetPopulation(BaseModel):
            '''Get the current population in a given location'''

            location: str = Field(
                ..., description="The city and state, e.g. San Francisco, CA"
            )


        llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
        ai_msg = llm_with_tools.invoke(
            "Which city is hotter today and which is bigger: LA or NY?"
        )
        ai_msg.tool_calls

    .. code-block:: python

        [
            {
                "name": "GetWeather",
                "args": {"location": "Los Angeles, CA"},
                "id": "call_6XswGD5Pqk8Tt5atYr7tfenU",
            },
            {
                "name": "GetWeather",
                "args": {"location": "New York, NY"},
                "id": "call_ZVL15vA8Y7kXqOy3dtmQgeCi",
            },
            {
                "name": "GetPopulation",
                "args": {"location": "Los Angeles, CA"},
                "id": "call_49CFW8zqC9W7mh7hbMLSIrXw",
            },
            {
                "name": "GetPopulation",
                "args": {"location": "New York, NY"},
                "id": "call_6ghfKxV264jEfe1mRIkS3PE7",
            },
        ]

Structured output:
    .. code-block:: python

        from typing import Optional

        from pydantic import BaseModel, Field


        class Joke(BaseModel):
            '''Joke to tell user.'''

            setup: str = Field(description="The setup of the joke")
            punchline: str = Field(description="The punchline to the joke")
            rating: Optional[int] = Field(
                description="How funny the joke is, from 1 to 10"
            )


        structured_llm = llm.with_structured_output(Joke)
        structured_llm.invoke("Tell me a joke about cats")

    .. code-block:: python

        Joke(
            setup="Why was the cat sitting on the computer?",
            punchline="To keep an eye on the mouse!",
            rating=None,
        )

    See ``AzureChatOpenAI.with_structured_output()`` for more.

JSON mode:
    .. code-block:: python

        json_llm = llm.bind(response_format={"type": "json_object"})
        ai_msg = json_llm.invoke(
            "Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]"
        )
        ai_msg.content

    .. code-block:: python

        '\n{\n  "random_ints": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\n}'

Image input:
    .. code-block:: python

        import base64
        import httpx
        from langchain_core.messages import HumanMessage

        image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
        image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
        message = HumanMessage(
            content=[
                {"type": "text", "text": "describe the weather in this image"},
                {
                    "type": "image_url",
                    "image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
                },
            ]
        )
        ai_msg = llm.invoke([message])
        ai_msg.content

    .. code-block:: python

        "The weather in the image appears to be quite pleasant. The sky is mostly clear"

Token usage:
    .. code-block:: python

        ai_msg = llm.invoke(messages)
        ai_msg.usage_metadata

    .. code-block:: python

        {"input_tokens": 28, "output_tokens": 5, "total_tokens": 33}

Logprobs:
    .. code-block:: python

        logprobs_llm = llm.bind(logprobs=True)
        ai_msg = logprobs_llm.invoke(messages)
        ai_msg.response_metadata["logprobs"]

    .. code-block:: python

        {
            "content": [
                {
                    "token": "J",
                    "bytes": [74],
                    "logprob": -4.9617593e-06,
                    "top_logprobs": [],
                },
                {
                    "token": "'adore",
                    "bytes": [39, 97, 100, 111, 114, 101],
                    "logprob": -0.25202933,
                    "top_logprobs": [],
                },
                {
                    "token": " la",
                    "bytes": [32, 108, 97],
                    "logprob": -0.20141791,
                    "top_logprobs": [],
                },
                {
                    "token": " programmation",
                    "bytes": [
                        32,
                        112,
                        114,
                        111,
                        103,
                        114,
                        97,
                        109,
                        109,
                        97,
                        116,
                        105,
                        111,
                        110,
                    ],
                    "logprob": -1.9361265e-07,
                    "top_logprobs": [],
                },
                {
                    "token": ".",
                    "bytes": [46],
                    "logprob": -1.2233183e-05,
                    "top_logprobs": [],
                },
            ]
        }

Response metadata
    .. code-block:: python

        ai_msg = llm.invoke(messages)
        ai_msg.response_metadata

    .. code-block:: python

        {
            "token_usage": {
                "completion_tokens": 6,
                "prompt_tokens": 28,
                "total_tokens": 34,
            },
            "model_name": "gpt-35-turbo",
            "system_fingerprint": None,
            "prompt_filter_results": [
                {
                    "prompt_index": 0,
                    "content_filter_results": {
                        "hate": {"filtered": False, "severity": "safe"},
                        "self_harm": {"filtered": False, "severity": "safe"},
                        "sexual": {"filtered": False, "severity": "safe"},
                        "violence": {"filtered": False, "severity": "safe"},
                    },
                }
            ],
            "finish_reason": "stop",
            "logprobs": None,
            "content_filter_results": {
                "hate": {"filtered": False, "severity": "safe"},
                "self_harm": {"filtered": False, "severity": "safe"},
                "sexual": {"filtered": False, "severity": "safe"},
                "violence": {"filtered": False, "severity": "safe"},
            },
        }

AZURE_OPENAI_ENDPOINTN)default)default_factoryzOptional[str]azure_endpointazure_deployment)r8   aliaszUnion[str, None]deployment_nameapi_versionOPENAI_API_VERSION)r<   r9   openai_api_versionapi_keyAZURE_OPENAI_API_KEYOPENAI_API_KEYzOptional[SecretStr]openai_api_keyAZURE_OPENAI_AD_TOKENazure_ad_tokenzUnion[Callable[[], str], None]azure_ad_token_providerz)Union[Callable[[], Awaitable[str]], None]azure_ad_async_token_provider strmodel_versionOPENAI_API_TYPEazureopenai_api_typeTboolvalidate_base_urlmodel
model_namezOptional[dict[str, Any]]disabled_paramsc                
    / SQ$ )z*Get the namespace of the langchain object.)	langchainchat_modelsazure_openair%   clss    r-   get_lc_namespace AzureChatOpenAI.get_lc_namespaceE  s
     <;r,   c                    SSS.$ )NrB   rE   )rD   rF   r%   selfs    r-   
lc_secretsAzureChatOpenAI.lc_secretsJ  s     55
 	
r,   c                    g)NTr%   rX   s    r-   is_lc_serializable"AzureChatOpenAI.is_lc_serializableQ  s    r,   after)modec                   U R                   b  U R                   S:  a  [        S5      eU R                   b,  U R                   S:  a  U R                  (       a  [        S5      eU R                  c+  U R                  (       a  U R                  S:X  a  O	SS0U l        U R
                  =(       d3    [        R                  " S5      =(       d    [        R                  " S5      U l        U R                  nU(       a>  U R                  (       a-  S	U;  a  [        S
5      eU R                  (       a  [        S5      eU R                  U R                  U R                  U R                  (       a  U R                  R                  5       OSU R                  (       a  U R                  R                  5       OSU R                   U R
                  U R                  U R"                  0 U R$                  =(       d    0 ESS0EU R&                  S.nU R(                  b  U R(                  US'   U R*                  (       dP  SU R,                  0n[.        R0                  " S0 UDUD6U l        U R2                  R4                  R6                  U l        U R8                  (       dp  SU R:                  0nU R<                  (       a  U R<                  US'   [.        R>                  " S0 UDUD6U l         U R@                  R4                  R6                  U l        U $ )z?Validate that api key and python package exists in environment.N   zn must be at least 1.zn must be 1 when streaming.zgpt-4oparallel_tool_callsOPENAI_ORG_IDOPENAI_ORGANIZATIONz/openaizAs of openai>=1.0.0, Azure endpoints should be specified via the `azure_endpoint` param not `openai_api_base` (or alias `base_url`).a  As of openai>=1.0.0, if `azure_deployment` (or alias `deployment_name`) is specified then `base_url` (or alias `openai_api_base`) should not be. If specifying `azure_deployment`/`deployment_name` then use `azure_endpoint` instead of `base_url`.

For example, you could specify:

azure_endpoint="https://xxx.openai.azure.com/", azure_deployment="my-deployment"

Or you can equivalently specify:

base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"z
User-Agentz%langchain-partner-python-azure-openai)r>   r:   r;   rA   rF   rG   organizationbase_urltimeoutdefault_headersdefault_querymax_retrieshttp_clientrG   r%   )!n
ValueError	streamingrS   rR   openai_organizationosgetenvopenai_api_baserP   r=   r@   r:   rD   get_secret_valuerF   rG   request_timeoutrn   ro   rp   clientrq   openaiAzureOpenAIroot_clientchatcompletionsasync_clienthttp_async_clientrH   AsyncAzureOpenAIroot_async_client)r^   rx   client_paramssync_specificasync_specifics        r-   validate_environment$AzureChatOpenAI.validate_environmentU  s    66$&&1*455VVDFFQJ4>>:;;'4??h#>(=t'D$ $$ 0yy)0yy./ 	  ..t55/ - 
 ## 	_   22"11 $ 4 4:>:M:M##446SW ;?:M:M##446SW'+'C'C 44,,++ ''-2 E  "//%
( '+/+;+;M-({{*D,<,<=M%11SMS]SD**//;;DK  +T-C-CDN1166 78 &,%<%< && &D" !% 6 6 ; ; G GDr,   c                8   > 0 SU R                   0E[        TU ]  E$ )zGet the identifying parameters.r;   )r=   super_identifying_params)r^   	__class__s    r-   r   #AzureChatOpenAI._identifying_params  s-    
!4#7#78
g)
 	
r,   c                    g)Nzazure-openai-chatr%   r]   s    r-   	_llm_typeAzureChatOpenAI._llm_type  s    "r,   c                4    U R                   U R                  S.$ )NrN   r@   r   r]   s    r-   lc_attributesAzureChatOpenAI.lc_attributes  s       $33"&"9"9
 	
r,   c                r  > [         TU ]  " SSU0UD6nSUS'   U R                  (       am  U R                  (       aJ  U R                  U R                  ;  a0  U R                  S-   U R                  R	                  S5      -   US'   U$ U R                  US'    U$ U R
                  (       a  U R
                  US'   U$ )z,Get the parameters used to invoke the model.stoprM   ls_provider-ls_model_namer%   )r   _get_ls_paramsrR   rK   lstripr=   )r^   r   kwargsparamsr   s       r-   r   AzureChatOpenAI._get_ls_params  s     '<T<V< '}??!!d&8&8&OOOc)D,>,>,E,Ec,JJ '  +///'  !!&*&:&:F?#r,   c                  > [         TU ]  X5      n[        U[        5      (       d  UR	                  5       nUS    H$  nUR                  SS 5      S:X  d  M  [        S5      e   SU;   aO  US   nU R                  (       a  U SU R                   3nUR                  =(       d    0 Ul        XSR                  S'   SU;   a,  UR                  =(       d    0 Ul        US   UR                  S'   [        UR                  US   5       H>  u  pgUR                  =(       d    0 Ul        UR                  S	0 5      UR                  S	'   M@     U$ )
Nchoicesfinish_reasoncontent_filterzKAzure has not provided the response due to a content filter being triggeredrQ   r   rR   prompt_filter_resultscontent_filter_results)r   _create_chat_resultr0   dict
model_dumpgetrs   rK   
llm_outputzipgenerationsgeneration_info)	r^   responser   chat_resultresrQ   chat_genresponse_choicer   s	           r-   r   #AzureChatOpenAI._create_chat_result  sP   
 g1(L(D))**,HI&Cww-1AA &  ' hW%E!! '4#5#5"67%0%;%;%ArK"38""<0"h.%0%;%;%ArK">F'?K""#:; *-##Xi%8*
%H (0'?'?'E2H$APATAT("BH$$%=>	*
 r,   c                   > U R                  0 UEU R                  E5      (       a  [        TU ]  " U0 UD6$ [        TU ]  " U0 UD6$ )+Route to Chat Completions or Responses API.)_use_responses_apimodel_kwargsr   _stream_responses_stream)r^   argsr   r   s      r-   r   AzureChatOpenAI._stream  sN    ""#Bf#B0A0A#BCC7,d=f==7?D3F33r,   c                  >#    U R                  0 UEU R                  E5      (       a   [        TU ]  " U0 UD6  Sh  vN nU7v   M  [        TU ]  " U0 UD6  Sh  vN nU7v   M   N+
 g N
 g7f)r   N)r   r   r   _astream_responses_astream)r^   r   r   chunkr   s       r-   r   AzureChatOpenAI._astream  sv      ""#Bf#B0A0A#BCC$w94J6J e$w/@@ e	J@sJ   5A0A*A(A* A0A.A,A. A0(A**A0,A..A0json_schemaFmethodinclude_rawstrictc               ,   > [         TU ]  " U4X#US.UD6$ )a8  Model wrapper that returns outputs formatted to match the given schema.

Args:
    schema: The output schema. Can be passed in as:

        - a JSON Schema,
        - a TypedDict class,
        - or a Pydantic class,
        - an OpenAI function/tool schema.

        If ``schema`` is a Pydantic class then the model output will be a
        Pydantic instance of that class, and the model-generated fields will be
        validated by the Pydantic class. Otherwise the model output will be a
        dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
        for more on how to properly specify types and descriptions of
        schema fields when specifying a Pydantic or TypedDict class.

    method: The method for steering model generation, one of:

        - ``'json_schema'``:
            Uses OpenAI's `Structured Output API <https://platform.openai.com/docs/guides/structured-outputs>`__.
            Supported for ``'gpt-4o-mini'``, ``'gpt-4o-2024-08-06'``, ``'o1'``, and later
            models.
        - ``'function_calling'``:
            Uses OpenAI's tool-calling (formerly called function calling)
            `API <https://platform.openai.com/docs/guides/function-calling>`__
        - ``'json_mode'``:
            Uses OpenAI's `JSON mode <https://platform.openai.com/docs/guides/structured-outputs/json-mode>`__.
            Note that if using JSON mode then you must include instructions for
            formatting the output into the desired schema into the model call

        Learn more about the differences between the methods and which models
        support which methods `here <https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format>`__.

    include_raw:
        If False then only the parsed structured output is returned. If
        an error occurs during model output parsing it will be raised. If True
        then both the raw model response (a BaseMessage) and the parsed model
        response will be returned. If an error occurs during output parsing it
        will be caught and returned as well. The final output is always a dict
        with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
    strict:

        - True:
            Model output is guaranteed to exactly match the schema.
            The input schema will also be validated according to the `supported schemas <https://platform.openai.com/docs/guides/structured-outputs/supported-schemas?api-mode=responses#supported-schemas>`__.
        - False:
            Input schema will not be validated and model output will not be
            validated.
        - None:
            ``strict`` argument will not be passed to the model.

        If schema is specified via TypedDict or JSON schema, ``strict`` is not
        enabled by default. Pass ``strict=True`` to enable it.

        .. note:
            ``strict`` can only be non-null if ``method`` is
            ``'json_schema'`` or ``'function_calling'``.
    tools:
        A list of tool-like objects to bind to the chat model. Requires that:

        - ``method`` is ``'json_schema'`` (default).
        - ``strict=True``
        - ``include_raw=True``

        If a model elects to call a
        tool, the resulting ``AIMessage`` in ``'raw'`` will include tool calls.

        .. dropdown:: Example

            .. code-block:: python

                from langchain.chat_models import init_chat_model
                from pydantic import BaseModel


                class ResponseSchema(BaseModel):
                    response: str


                def get_weather(location: str) -> str:
                    """Get weather at a location."""
                    pass

                llm = init_chat_model("openai:gpt-4o-mini")

                structured_llm = llm.with_structured_output(
                    ResponseSchema,
                    tools=[get_weather],
                    strict=True,
                    include_raw=True,
                )

                structured_llm.invoke("What's the weather in Boston?")

            .. code-block:: python

                {
                    "raw": AIMessage(content="", tool_calls=[...], ...),
                    "parsing_error": None,
                    "parsed": None,
                }

    kwargs: Additional keyword args are passed through to the model.

Returns:
    A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.

    If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
    an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.

    If ``include_raw`` is True, then Runnable outputs a dict with keys:

    - ``'raw'``: BaseMessage
    - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
    - ``'parsing_error'``: Optional[BaseException]

.. versionchanged:: 0.1.20

    Added support for TypedDict class ``schema``.

.. versionchanged:: 0.1.21

    Support for ``strict`` argument added.
    Support for ``method="json_schema"`` added.

.. versionchanged:: 0.3.0

    ``method`` default changed from "function_calling" to "json_schema".

.. versionchanged:: 0.3.12
    Support for ``tools`` added.

.. versionchanged:: 0.3.21
    Pass ``kwargs`` through to the model.

.. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False, strict=True

    Note, OpenAI has a number of restrictions on what types of schemas can be
    provided if ``strict`` = True. When using Pydantic, our model cannot
    specify any Field metadata (like min/max constraints) and fields cannot
    have default values.

    See all constraints `here <https://platform.openai.com/docs/guides/structured-outputs/supported-schemas>`__.

    .. code-block:: python

        from typing import Optional

        from langchain_openai import AzureChatOpenAI
        from pydantic import BaseModel, Field


        class AnswerWithJustification(BaseModel):
            '''An answer to the user question along with justification for the answer.'''

            answer: str
            justification: Optional[str] = Field(
                default=..., description="A justification for the answer."
            )


        llm = AzureChatOpenAI(
            azure_deployment="...", model="gpt-4o", temperature=0
        )
        structured_llm = llm.with_structured_output(AnswerWithJustification)

        structured_llm.invoke(
            "What weighs more a pound of bricks or a pound of feathers"
        )

        # -> AnswerWithJustification(
        #     answer='They weigh the same',
        #     justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
        # )

.. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False, strict=False

    .. code-block:: python

        from typing import Optional

        from langchain_openai import AzureChatOpenAI
        from pydantic import BaseModel, Field


        class AnswerWithJustification(BaseModel):
            '''An answer to the user question along with justification for the answer.'''

            answer: str
            justification: Optional[str] = Field(
                default=..., description="A justification for the answer."
            )


        llm = AzureChatOpenAI(
            azure_deployment="...", model="gpt-4o", temperature=0
        )
        structured_llm = llm.with_structured_output(
            AnswerWithJustification, method="function_calling"
        )

        structured_llm.invoke(
            "What weighs more a pound of bricks or a pound of feathers"
        )

        # -> AnswerWithJustification(
        #     answer='They weigh the same',
        #     justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
        # )

.. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=True

    .. code-block:: python

        from langchain_openai import AzureChatOpenAI
        from pydantic import BaseModel


        class AnswerWithJustification(BaseModel):
            '''An answer to the user question along with justification for the answer.'''

            answer: str
            justification: str


        llm = AzureChatOpenAI(
            azure_deployment="...", model="gpt-4o", temperature=0
        )
        structured_llm = llm.with_structured_output(
            AnswerWithJustification, include_raw=True
        )

        structured_llm.invoke(
            "What weighs more a pound of bricks or a pound of feathers"
        )
        # -> {
        #     'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
        #     'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
        #     'parsing_error': None
        # }

.. dropdown:: Example: schema=TypedDict class, method="json_schema", include_raw=False, strict=False

    .. code-block:: python

        from typing_extensions import Annotated, TypedDict

        from langchain_openai import AzureChatOpenAI


        class AnswerWithJustification(TypedDict):
            '''An answer to the user question along with justification for the answer.'''

            answer: str
            justification: Annotated[
                Optional[str], None, "A justification for the answer."
            ]


        llm = AzureChatOpenAI(
            azure_deployment="...", model="gpt-4o", temperature=0
        )
        structured_llm = llm.with_structured_output(AnswerWithJustification)

        structured_llm.invoke(
            "What weighs more a pound of bricks or a pound of feathers"
        )
        # -> {
        #     'answer': 'They weigh the same',
        #     'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
        # }

.. dropdown:: Example: schema=OpenAI function schema, method="json_schema", include_raw=False

    .. code-block:: python

        from langchain_openai import AzureChatOpenAI

        oai_schema = {
            'name': 'AnswerWithJustification',
            'description': 'An answer to the user question along with justification for the answer.',
            'parameters': {
                'type': 'object',
                'properties': {
                    'answer': {'type': 'string'},
                    'justification': {'description': 'A justification for the answer.', 'type': 'string'}
                },
               'required': ['answer']
           }
       }

        llm = AzureChatOpenAI(
            azure_deployment="...",
            model="gpt-4o",
            temperature=0,
        )
        structured_llm = llm.with_structured_output(oai_schema)

        structured_llm.invoke(
            "What weighs more a pound of bricks or a pound of feathers"
        )
        # -> {
        #     'answer': 'They weigh the same',
        #     'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
        # }

.. dropdown:: Example: schema=Pydantic class, method="json_mode", include_raw=True

    .. code-block::

        from langchain_openai import AzureChatOpenAI
        from pydantic import BaseModel

        class AnswerWithJustification(BaseModel):
            answer: str
            justification: str

        llm = AzureChatOpenAI(
            azure_deployment="...",
            model="gpt-4o",
            temperature=0,
        )
        structured_llm = llm.with_structured_output(
            AnswerWithJustification,
            method="json_mode",
            include_raw=True
        )

        structured_llm.invoke(
            "Answer the following question. "
            "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
            "What's heavier a pound of bricks or a pound of feathers?"
        )
        # -> {
        #     'raw': AIMessage(content='{\n    "answer": "They are both the same weight.",\n    "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'),
        #     'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'),
        #     'parsing_error': None
        # }

.. dropdown:: Example: schema=None, method="json_mode", include_raw=True

    .. code-block::

        structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)

        structured_llm.invoke(
            "Answer the following question. "
            "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
            "What's heavier a pound of bricks or a pound of feathers?"
        )
        # -> {
        #     'raw': AIMessage(content='{\n    "answer": "They are both the same weight.",\n    "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'),
        #     'parsed': {
        #         'answer': 'They are both the same weight.',
        #         'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'
        #     },
        #     'parsing_error': None
        # }

r   )r   with_structured_output)r^   schemar   r   r   r   r   s         r-   r   &AzureChatOpenAI.with_structured_output  s-    d w-
!6
MS
 	
r,   )r   r{   rS   ru   r   r~   )returnz	list[str])r   zdict[str, str])r   rO   )r   r   )r   zdict[str, Any])r   rJ   r/   )r   zOptional[list[str]]r   r   r   r   )r   zUnion[dict, openai.BaseModel]r   zOptional[dict]r   r   )r   r   r   r   r   zIterator[ChatGenerationChunk])r   r   r   r   r   z"AsyncIterator[ChatGenerationChunk])r   zOptional[_DictOrPydanticClass]r   z7Literal['function_calling', 'json_mode', 'json_schema']r   rO   r   zOptional[bool]r   r   r   z-Runnable[LanguageModelInput, _DictOrPydantic])&r&   r'   r(   r)   __doc__r   r   r:   r*   r=   r@   r   rD   rF   rG   rH   rK   rN   rP   rR   rS   classmethodrZ   propertyr_   rb   r   r   r   r   r   r   r   r   r   r   r+   __classcell__)r   s   @r-   r5   r5   )   s   l\ %* !8$G%NM  ).dBT(UO%U ). !5tD)  V +0'#%56
+N'  X*/'(?N+N'  ?C;B PT!#LS
 M3
 &+ !2GD&O]  0"t" !&d' BJB 16d0CO-C* < < 
 
   '"S #Sj 
 
 # # 
 
 +/':=	 ( +/$/$ ($ 
	$ $L4		$'		+	 26t
 KX!!%t
.t
 H	t

 t
 t
 t
 
7t
 t
r,   r5   )r2   r   r   rO   )7r   
__future__r   loggingrv   collections.abcr   r   r   typingr   r   r	   r
   r   r   r|   langchain_core.language_modelsr   *langchain_core.language_models.chat_modelsr   langchain_core.messagesr   langchain_core.outputsr   r   langchain_core.runnablesr   langchain_core.utilsr   r   langchain_core.utils.pydanticr   pydanticr   r   r   r   typing_extensionsr   r   !langchain_openai.chat_models.baser   	getLoggerr&   loggerr   r   rJ   r1   _DictOrPydanticClass_DictOrPydanticr    r3   r5   r%   r,   r-   <module>r      s      "  	 > > E E  = F / B - : ? A A + <			8	$ e9%T#s(^T#Y67 c	"+Y +@R
n R
r,   