"Class to run the LLM model"
import asyncio
import json
import logging
import os
import time
from datetime import datetime

import openai
from dotenv import load_dotenv
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_openai import ChatOpenAI

from configs.config import OPENAI_TOKEN_LIMIT
from services.company_profile.data_classes.llm_results import LeadershipResults
from services.company_profile.data_classes.SIC_code import SICCode
from utils.check_token_limit import check_token_limit
from utils.load_sic_codes import load_sic_codes

load_dotenv()

from configs.config import OPENAI_API_KEY, OPENAI_MODEL_35

sic_dict = load_sic_codes()


class LLMChat:
    "Class to run the LLM model"

    def __init__(self):
        "Class to run the LLM model"
        self.llm = ChatOpenAI(
            temperature=0.1, model_name=OPENAI_MODEL_35, api_key=OPENAI_API_KEY
        )

    async def openai_request(self, page_text, openai_prompt):
        """
        Request a summary from OpenAI API
        """
        if isinstance(page_text, str):
            page_text = check_token_limit(
                page_text, int(OPENAI_TOKEN_LIMIT / 2.0)
            )  # give room for the prompt
        response_text = ""
        seed_text = None

        for pg_text in page_text:
            # time.sleep(1)
            try:
                response = await self.openai_request_call(
                    pg_text, openai_prompt, seed_text
                )
            except openai.RateLimitError as e:
                print(e)
                logging.error(
                    "%s:OpenAI Rate Limit Error: %s",
                    datetime.today().strftime("%Y-%m-%D %H:%M:%S"),
                    e,
                )
                time.sleep(20)
                response = await self.openai_request_call(
                    pg_text, openai_prompt, seed_text
                )
            if response is not None:
                response_text = response.content
                seed_text = response_text

        return response_text

    async def openai_request_call(self, page_text, openai_prompt, seed_text=None):
        "Call the OpenAI API to get the response for the given text"

        try:
            prompt_temp = [
                (
                    "system",
                    "You are an expert consultant. Step 1 based on the text, become an expert in that company's industry and then follow the instructions below.",
                ),
                (
                    "system",
                    "Your responses should be formatted in bullets each 20 tokens or less.",
                ),
                (
                    "system",
                    "You will communicate in a formal tone, ensuring clear, succinct and professional dialogue. It will provide communication with precision, avoiding colloquial language and maintaining a serious demeanor.",
                ),
                ("human", "{page_text}"),
                ("human", openai_prompt),
            ]
            if seed_text:
                prompt_temp.append(
                    (
                        "user",
                        "You have already provided a summary here * {seed_text}*. Only provide a summary if you have more to add. Otherwise, return the same summary.",
                    )
                )

            prompt = ChatPromptTemplate.from_messages(prompt_temp)

            llm = ChatOpenAI(
                temperature=0.1, model_name=OPENAI_MODEL_35, api_key=OPENAI_API_KEY
            )

            chain = prompt | llm
            response = await chain.ainvoke(
                {
                    "page_text": page_text,
                    "seed_text": seed_text,
                }
            )

            return response

        except Exception as e:
            print(e)
            return None

    async def product_to_sic(self, product_info: str):
        "Get the SIC code for the provided product_info"

        if product_info is None:
            raise ValueError("Product info is required")

        parser = JsonOutputParser(pydantic_object=SICCode)
        chat_template = ChatPromptTemplate.from_messages(
            [
                ("system", "You are a helpful AI bot in company SIC code analysis."),
                (
                    "system",
                    "Use these SIC codes and the related industry and sector. {sic_dict_json_string}",
                ),
                (
                    "system",
                    "Take in the product information and provide the SIC code for the company. Do not provide NAIC or other codes.",
                ),
                ("human", "Product Description: \n {product_info} \n"),
                ("human", "{JSONoutput}"),
            ]
        )

        chain = chat_template | self.llm | parser

        chain_response = await chain.ainvoke(
            {
                "sic_dict_json_string": json.dumps(sic_dict),
                "product_info": product_info,
                "JSONoutput": parser.get_format_instructions(),
            }
        )

        return chain_response["code"]

    async def map_leadership(self, company_leadership):
        "Map the leadership using the LLM"

        if company_leadership is None:
            raise ValueError("Company leadership is required")

        parser = JsonOutputParser(pydantic_object=LeadershipResults)
        chat_template = ChatPromptTemplate.from_messages(
            [
                ("system", "You are a helpful AI bot in company leadership analysis."),
                (
                    "system",
                    """Given a list of leadership names and titles.\n
                    Return a structured output with leader names, titles and linkedin links if provided. If there are multiple linkedin links that match, choose the first one. If there isn't a match, leave it blank""",
                ),
                (
                    "system",
                    "Return a json list including the leader name, title, and linkedin link.",
                ),
                ("human", "Leadership: \n {company_leadership} \n"),
                (
                    "system",
                    "Structure the JSON output using this template. {JSONoutput}",
                ),
            ]
        )

        chain = chat_template | self.llm | parser

        chain_response = await chain.ainvoke(
            {
                "company_leadership": company_leadership,
                "JSONoutput": parser.get_format_instructions(),
            }
        )

        return chain_response


if __name__ == "__main__":
    print("hi")
    company_leadership = """
    Elon Musk: Founder and CEO
    - Elon Musk: CEO
    - Max Hodak: President
    - Vanessa Tolosa: Vice President of Engineering
    - Philip Sabes: Vice President of Neuroscience
    - Tim Hanson: Vice President of Manufacturing
    - Jared Birchall: Vice President of Legal Affairs
    - Steve Petrou: Vice President of Finance
    - DJ Seo: Vice President of Implant Systems
    - Ben Rapoport: Vice President of Hardware
    - Will Brandler: Vice President of Software
    - Eric Whitmire: Vice President of Clinical Research
    - Matthew MacDougall: Vice President of Business Development
    """
    llm = LLMChat()
    mapping = asyncio.run(llm.map_leadership(company_leadership))

    print(mapping)
