
    @h;                         S SK Jr  S SKJr  S SKJr  S SKJr  S SKJ	r	J
r
  S SKJr  S SKJr  S SKJr  S S	KJr   SS\S\\   S\S\\   S\	4
S jjrg
)    )Sequence)Optional)BaseLanguageModel)ChatPromptTemplate)RunnableRunnablePassthrough)BaseTool)convert_to_openai_toolformat_to_openai_tool_messages)OpenAIToolsAgentOutputParserNllmtoolspromptstrictreturnc           
      6   S1R                  UR                  [        UR                  5      -   5      nU(       a  SU 3n[	        U5      eU R                  U Vs/ sH  n[        XcS9PM     snS9n[        R                  " S S9U-  U-  [        5       -  $ s  snf )a	  Create an agent that uses OpenAI tools.

Args:
    llm: LLM to use as the agent.
    tools: Tools this agent has access to.
    prompt: The prompt to use. See Prompt section below for more on the expected
        input variables.

Returns:
    A Runnable sequence representing an agent. It takes as input all the same input
    variables as the prompt passed in does. It returns as output either an
    AgentAction or AgentFinish.

Raises:
    ValueError: If the prompt is missing required variables.

Example:

    .. code-block:: python

        from langchain import hub
        from langchain_community.chat_models import ChatOpenAI
        from langchain.agents import AgentExecutor, create_openai_tools_agent

        prompt = hub.pull("hwchase17/openai-tools-agent")
        model = ChatOpenAI()
        tools = ...

        agent = create_openai_tools_agent(model, tools, prompt)
        agent_executor = AgentExecutor(agent=agent, tools=tools)

        agent_executor.invoke({"input": "hi"})

        # Using with chat history
        from langchain_core.messages import AIMessage, HumanMessage
        agent_executor.invoke(
            {
                "input": "what's my name?",
                "chat_history": [
                    HumanMessage(content="hi! my name is bob"),
                    AIMessage(content="Hello Bob! How can I assist you today?"),
                ],
            }
        )

Prompt:

    The agent prompt must have an `agent_scratchpad` key that is a
        ``MessagesPlaceholder``. Intermediate agent actions and tool output
        messages will be passed in here.

    Here's an example:

    .. code-block:: python

        from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", "You are a helpful assistant"),
                MessagesPlaceholder("chat_history", optional=True),
                ("human", "{input}"),
                MessagesPlaceholder("agent_scratchpad"),
            ]
        )
agent_scratchpadz#Prompt missing required variables: )r   )r   c                     [        U S   5      $ )Nintermediate_stepsr   )xs    Z/var/www/html/shao/venv/lib/python3.13/site-packages/langchain/agents/openai_tools/base.py<lambda>+create_openai_tools_agent.<locals>.<lambda>e   s    'E&'(    )r   )

differenceinput_variableslistpartial_variables
ValueErrorbindr
   r   assignr   )r   r   r   r   missing_varsmsgtoolllm_with_toolss           r   create_openai_tools_agentr'      s    P ''22f&>&>!??L 3L>BoXXGLMut%d:uM  N
 	""	

 	 	 '
(	)	 Ns   B)N)collections.abcr   typingr   langchain_core.language_modelsr   langchain_core.prompts.chatr   langchain_core.runnablesr   r   langchain_core.toolsr	   %langchain_core.utils.function_callingr
   /langchain.agents.format_scratchpad.openai_toolsr   ,langchain.agents.output_parsers.openai_toolsr   boolr'    r   r   <module>r3      sl    $  < : B ) H V "	\	\H\ \ TN	\
 \r   