
    Ah9                        S r SSKJr  SSKJrJr  SSKJr  SSKJ	r	  SSK
Jr  SSKJr  SSKJr  SS	KJr  SS
KJr  SSKJrJr  SSKJr  \" SSSS9 " S S\5      5       rg)zCUse a single chain to route an input to one of multiple llm chains.    )annotations)AnyOptional)
deprecated)BaseLanguageModel)PromptTemplate)ConversationChain)Chain)LLMChain)MultiRouteChain)LLMRouterChainRouterOutputParser)MULTI_PROMPT_ROUTER_TEMPLATEz0.2.12z1.0zPlease see migration guide here for recommended implementation: https://python.langchain.com/docs/versions/migrating_chains/multi_prompt_chain/)sinceremovalmessagec                  X    \ rS rSrSr\SS j5       r\ S         S	S jj5       rSr	g)
MultiPromptChain   aG  A multi-route chain that uses an LLM router chain to choose amongst prompts.

This class is deprecated. See below for a replacement, which offers several
benefits, including streaming and batch support.

Below is an example implementation:

    .. code-block:: python

        from operator import itemgetter
        from typing import Literal

        from langchain_core.output_parsers import StrOutputParser
        from langchain_core.prompts import ChatPromptTemplate
        from langchain_core.runnables import RunnableConfig
        from langchain_openai import ChatOpenAI
        from langgraph.graph import END, START, StateGraph
        from typing_extensions import TypedDict

        llm = ChatOpenAI(model="gpt-4o-mini")

        # Define the prompts we will route to
        prompt_1 = ChatPromptTemplate.from_messages(
            [
                ("system", "You are an expert on animals."),
                ("human", "{input}"),
            ]
        )
        prompt_2 = ChatPromptTemplate.from_messages(
            [
                ("system", "You are an expert on vegetables."),
                ("human", "{input}"),
            ]
        )

        # Construct the chains we will route to. These format the input query
        # into the respective prompt, run it through a chat model, and cast
        # the result to a string.
        chain_1 = prompt_1 | llm | StrOutputParser()
        chain_2 = prompt_2 | llm | StrOutputParser()


        # Next: define the chain that selects which branch to route to.
        # Here we will take advantage of tool-calling features to force
        # the output to select one of two desired branches.
        route_system = "Route the user's query to either the animal or vegetable expert."
        route_prompt = ChatPromptTemplate.from_messages(
            [
                ("system", route_system),
                ("human", "{input}"),
            ]
        )


        # Define schema for output:
        class RouteQuery(TypedDict):
            """Route query to destination expert."""

            destination: Literal["animal", "vegetable"]


        route_chain = route_prompt | llm.with_structured_output(RouteQuery)


        # For LangGraph, we will define the state of the graph to hold the query,
        # destination, and final answer.
        class State(TypedDict):
            query: str
            destination: RouteQuery
            answer: str


        # We define functions for each node, including routing the query:
        async def route_query(state: State, config: RunnableConfig):
            destination = await route_chain.ainvoke(state["query"], config)
            return {"destination": destination}


        # And one node for each prompt
        async def prompt_1(state: State, config: RunnableConfig):
            return {"answer": await chain_1.ainvoke(state["query"], config)}


        async def prompt_2(state: State, config: RunnableConfig):
            return {"answer": await chain_2.ainvoke(state["query"], config)}


        # We then define logic that selects the prompt based on the classification
        def select_node(state: State) -> Literal["prompt_1", "prompt_2"]:
            if state["destination"] == "animal":
                return "prompt_1"
            else:
                return "prompt_2"


        # Finally, assemble the multi-prompt chain. This is a sequence of two steps:
        # 1) Select "animal" or "vegetable" via the route_chain, and collect the answer
        # alongside the input query.
        # 2) Route the input query to chain_1 or chain_2, based on the
        # selection.
        graph = StateGraph(State)
        graph.add_node("route_query", route_query)
        graph.add_node("prompt_1", prompt_1)
        graph.add_node("prompt_2", prompt_2)

        graph.add_edge(START, "route_query")
        graph.add_conditional_edges("route_query", select_node)
        graph.add_edge("prompt_1", END)
        graph.add_edge("prompt_2", END)
        app = graph.compile()

        result = await app.ainvoke({"query": "what color are carrots"})
        print(result["destination"])
        print(result["answer"])
c                    S/$ )Ntext )selfs    \/var/www/html/shao/venv/lib/python3.13/site-packages/langchain/chains/router/multi_prompt.pyoutput_keysMultiPromptChain.output_keys   s	    x    Nc                x   U Vs/ sH  oUS    SUS    3PM     nnSR                  U5      n[        R                  " US9n[        US/[	        5       S9n	[
        R                  " X5      n
0 nU H%  nUS   nUS   n[        US/S	9n[        XS
9nUX'   M'     U=(       d
    [        USS9nU " SU
UUS.UD6$ s  snf )zCConvenience constructor for instantiating from destination prompts.namez: description
)destinationsinput)templateinput_variablesoutput_parserprompt_template)r$   r%   )llmpromptr   )r(   
output_key)router_chaindestination_chainsdefault_chainr   )	joinr   formatr   r   r   from_llmr   r	   )clsr(   prompt_infosr-   kwargspr"   destinations_strrouter_templaterouter_promptr+   r,   p_infor   r'   r)   chain_default_chains                     r   from_promptsMultiPromptChain.from_prompts   s     EQQLqV9+R-(8'9:LQ99\26==)
 '$$I,.

 &..sB"F&>D$%67O#_wiXF4E',$ # 'W*;PV*W 
%1(
 	
 	
' Rs   B7r   )returnz	list[str])N)
r(   r   r2   zlist[dict[str, str]]r-   zOptional[Chain]r3   r   r=   r   )
__name__
__module____qualname____firstlineno____doc__propertyr   classmethodr;   __static_attributes__r   r   r   r   r      sf    rh   
 *.	 
 
 + 
 '	 

  
 
 
  
r   r   N)rB   
__future__r   typingr   r   langchain_core._apir   langchain_core.language_modelsr   langchain_core.promptsr   langchain.chainsr	   langchain.chains.baser
   langchain.chains.llmr   langchain.chains.router.baser   "langchain.chains.router.llm_routerr   r   +langchain.chains.router.multi_prompt_promptr   r   r   r   r   <module>rQ      sX    I "   * < 1 . ' ) 8 Q T 
	Z	Z
 Z
Z
r   