import os
import sys
import threading
from datetime import datetime

from dotenv import load_dotenv
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain.prompts import ChatPromptTemplate
from langchain.tools import tool
from langchain_openai import ChatOpenAI

sys.path.append(r".")
from configs.config import OPENAI_MODEL_4O, OPENAI_MODEL_MINI
from utils.researcher.perplexity import PerplexityResearch

load_dotenv()

# Thread-local storage for research_text_combined
thread_local_data = threading.local()


class Researcher:
    "researcher class"

    llm4o = ChatOpenAI(
        api_key=os.getenv("OPENAI_API_KEY"), model=OPENAI_MODEL_MINI, temperature=0.1
    )

    llm4 = ChatOpenAI(
        api_key=os.getenv("OPENAI_API_KEY"), model=OPENAI_MODEL_4O, temperature=0.5
    )

    perplexity_key = os.getenv("PERPLEXITY_KEY")

    max_number_of_questions = 3

    summary_token_limit = 1000
    max_follow_up_level = 2  # this is the depth you should go down

    links_researched = {}

    def __init__(
        self, parent_question: str = None, primary_research=None, persona: str = None
    ):
        "initialize the class"
        self.initial_question = parent_question
        self.primary_research = primary_research if primary_research else ""
        self.persona_input = persona
        self.persona_extended = self.build_persona() if persona else None

        # Initialize thread-local research_text_combined
        if not hasattr(thread_local_data, "research_text_combined"):
            thread_local_data.research_text_combined = ""

    def build_persona(self):
        prompt = ChatPromptTemplate.from_messages(
            [
                (
                    "system",
                    "You are a skillful LLM user and understand how to create personas to the best of your ability",
                ),
                (
                    "human",
                    f"Build a persona for {self.persona_input}. This should include a system prompt for an LLM.",
                ),
                ("system", "Only return the persona as string"),
            ]
        )
        chain = prompt | self.llm4o
        return chain.invoke({}).content

    def conduct_research(self):
        messages = (
            [("system", f"Your persona is: {self.persona_extended}")]
            if self.persona_extended
            else []
        )
        messages += [
            (
                "system",
                "You have access to tools which allow you to search the web, verify research, and fill out the research",
            ),
            (
                "system",
                "You should use the tools and your own knowledge to answer the question.",
            ),
            (
                "system",
                "For complex questions, you should use the create research plan tool.",
            ),
            (
                "system",
                "If you use the web search tool, then you must use the verify research tool",
            ),
            (
                "system",
                "If the research is not complete, then you must use the fill out research tool",
            ),
            ("system", "Make sure to return the citations when provided"),
            (
                "system",
                "*** Before you provide an answer, you must verify the research ***",
            ),
            (
                "human",
                "You are not allowed to exit the chain without providing the research and answers.",
            ),
            (
                "system",
                "When you pass in the persona, you should make it at least 100 tokens long.",
            ),
            (
                "system",
                "When you use the web search tool, make your questions as specific as you can. Get the most relevant information and up to date information.",
            ),
            ("system", f"Today's date is: {datetime.now().strftime('%Y-%m-%d')}"),
            (
                "human",
                f"The question you are going to answer is: {self.initial_question}. You must use the tools to answer the question",
            ),
            ("system", "{agent_scratchpad}"),
        ]

        tools = [
            self.web_search,
            self.verify_research,
            self.fill_out_research,
            self.create_research_plan,
        ]
        agent = create_tool_calling_agent(
            tools=tools,
            llm=self.llm4,
            prompt=ChatPromptTemplate.from_messages(messages),
        )
        agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
        research = agent_executor.invoke({})["output"]

        # Summarize the research and update thread-local storage
        research = self.summarize_research(
            self.initial_question, thread_local_data.research_text_combined
        )
        thread_local_data.research_text_combined += (
            f"\n\n # Final Output for: {self.initial_question} \n {research}"
        )

        return thread_local_data.research_text_combined

    def summarize_research(self, initial_question: str, research: str):
        """Create a research plan.

        Args:
            question (str): The question to create a research plan for.
            persona (str, optional): The persona to use. Defaults to None.

        Returns:
            str: The research plan.
        """
        messages = [
            (
                "system",
                "You are going to be provided with a question and associated research to answer the question.",
            ),
            (
                "system",
                "You need to understand the research and then answer how the question can be answered using the research.",
            ),
        ]
        messages.append(
            ("human", f"The question you are trying to answer: {initial_question}")
        )
        messages.append(("human", f"The research you have access to: {research}"))
        prompt = ChatPromptTemplate.from_messages(messages)
        chain = prompt | Researcher.llm4o
        return chain.invoke(
            {"question": initial_question, "research": research}
        ).content

    @staticmethod
    @tool
    def web_search(question: str, persona: str = None):
        """Search the web for information.

        Args:
            question (str): The question to search for.
            persona (str, optional): The persona to use. Defaults to None.

        Returns:
            answer (dict): The answer to the question.
                answer: str, citations: list(url)
        """
        try:
            perplexity_research = PerplexityResearch()
            messages = [("system", "You are a helpful research agent.")]
            if persona:
                messages.append(("system", persona))
            messages.append(("human", question))
            response = perplexity_research.ask_perplexity(messages)
            response_pretty = perplexity_research.pretty_print(response)

            if not hasattr(thread_local_data, "research_text_combined"):
                thread_local_data.research_text_combined = ""

            thread_local_data.research_text_combined += (
                f"\n\n # {question} \n {response_pretty}"
            )
            return response_pretty
        except Exception as e:
            print(e)
            return "This search was unsuccessful, you can try a different question."

    @staticmethod
    @tool
    def verify_research(initial_question: str, research: str, persona: str = None):
        """Verify the research.

        Args:
            initial_question (str): The question to verify the research for.
            research (str): The research to verify.
            persona (str, optional): The persona to use. Defaults to None.

        Returns:
            str: The verification of the research. True or False and if False, an explanation as to why.
        """

        messages = [
            (
                "system",
                "You are going to be tasked with verifying a piece of research is complete.",
            ),
            (
                "system",
                "Your goal is to determine if the research answers the question.",
            ),
            (
                "system",
                "You will respond with True or False and if False, you will explain why.",
            ),
            (
                "system",
                "If you think the question can be answered more completely, then you should respond False and explain why.",
            ),
        ]
        if persona:
            messages.append(("system", persona))
        messages.append(
            ("human", f"The question you are answering is: {initial_question}")
        )
        messages.append(("human", f"The research you are verifying is: {research}"))
        prompt = ChatPromptTemplate.from_messages(messages)
        chain = prompt | Researcher.llm4o
        return chain.invoke(
            {"initial_question": initial_question, "research": research}
        ).content

    @staticmethod
    @tool
    def fill_out_research(lacking_research: str, persona: str = None):
        """fill out the research

        This tool takes in the research that is currently lacking and fills it out with
        additional information. The tool should respond with a list of questions that
        can be asked to an LLM with sufficient information to get the correct answer.
        """
        messages = [
            (
                "system",
                "You will be provided with what the research is missing and you will fill it out.",
            ),
            (
                "system",
                "You should respond in such a way that another LLM can use the information to answer the question.",
            ),
            (
                "system",
                "You will respond with a list of questions that can be asked to an LLM with sufficient information to get the correct answer",
            ),
        ]
        if persona:
            messages.append(("system", persona))
        messages.append(
            (
                "human",
                f"The current research is missing the following information: {lacking_research}",
            )
        )
        prompt = ChatPromptTemplate.from_messages(messages)
        chain = prompt | Researcher.llm4o
        return chain.invoke({"lacking_research": lacking_research}).content

    @staticmethod
    @tool
    def create_research_plan(question: str, persona: str = None):
        """Create a research plan.

        Args:
            question (str): The question to create a research plan for.
            persona (str, optional): The persona to use. Defaults to None.

        Returns:
            str: The research plan.
        """
        messages = [
            (
                "system",
                "You are going to be provided with a question and you will create a research plan to answer the question.",
            ),
            (
                "system",
                "You should respond with a step by step plan to comprehensively answer the question.",
            ),
            (
                "system",
                "You will respond with a list of questions that can be asked to an LLM with sufficient information to get the correct answer",
            ),
        ]
        if persona:
            messages.append(("system", persona))
        messages.append(("human", f"The question is: {question}"))
        prompt = ChatPromptTemplate.from_messages(messages)
        chain = prompt | Researcher.llm4o
        chain_output = chain.invoke({"question": question}).content
        return f"The research plan is: {chain_output}. YOu are now ready to use the web research tool to answer the question"


if __name__ == "__main__":
    r = Researcher(
        parent_question="What is the TAM for the Contract Lifecycle Management market?",
        persona="Industry analyst",
    )
    results = r.conduct_research()
    print(results)
