"Slide researcher"
# from utils.logger import ServiceLogger
import logging
import time

from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_openai import ChatOpenAI

from configs.config import OPENAI_API_KEY, OPENAI_MODEL_MINI

XCM_logger = logging.getLogger()


class SlideResearch:
    "Conduct research on the slide content"

    llm_model3 = ChatOpenAI(
        model=OPENAI_MODEL_MINI, temperature=1, api_key=OPENAI_API_KEY
    )

    llm_model4 = ChatOpenAI(model="gpt-4o", temperature=1, api_key=OPENAI_API_KEY)

    title_tokens = "10"
    text_tokens = "20-40"

    def __init__(self, slide, primary_data, parsing_class, prompt_addition=None):
        "Initialize the slide research class"
        self.slide = slide
        self.primary_data = primary_data
        # self.secondary_data = secondary_data
        self.parsing_class = parsing_class
        self.prompt_addition = prompt_addition

    def _format(self):
        "Format the slide content"
        error_output = True

        while error_output:
            try:
                prompt_temp = [
                    (
                        "system",
                        "You are an expert in converting the context provided and fit it within the format asked for",
                    ),
                    (
                        "system",
                        f"All titles should be {self.title_tokens} tokens or shorter",
                    ),
                    (
                        "system",
                        f"All helper text should be {self.text_tokens} tokens or shorter",
                    ),
                    ("human", "Slide title: {title}"),
                    ("human", "Slide highlevel takeaway is: {content}"),
                    (
                        "human",
                        """Slide research data to be used:
                    Rely on primary data here first: {primary_data} \n\n
                    Rely on secondary data here second if needed: {secondary_data}""",
                    ),
                    ("human", "convert to json format: {json_format}"),
                    (
                        "system",
                        """Make the slide content specific to the company. If you don't have specific data points or information, you can use [XXX] as a placeholder Examples: 
                     """,
                    ),
                ]

                if "graph" in self.parsing_class.__name__:
                    prompt_temp.append(
                        (
                            "human",
                            "If you don't have actual numbers, use 0 as the placeholder for the data points.",
                        )
                    )

                if self.prompt_addition:
                    prompt_temp.append(("human", self.prompt_addition))

                chat_prompt = ChatPromptTemplate.from_messages(prompt_temp)

                parser = JsonOutputParser(pydantic_object=self.parsing_class)

                chain = chat_prompt | self.llm_model3 | parser

                chain_output = chain.invoke(
                    {
                        "title": self.slide.title,
                        "content": self.slide.content,
                        "primary_data": self.primary_data,
                        "secondary_data": "",
                        "json_format": parser.get_format_instructions(),
                    }
                )
                # convert to a the class to ensure it worked and return the dictionary
                chain_output = self.parsing_class(**chain_output).dict()
                error_output = False
            except Exception:
                logging.error("Error in formatting the slide content.", exc_info=True)
                time.sleep(5)

                chain = chat_prompt | self.llm_model4 | parser

                chain_output = chain.invoke(
                    {
                        "title": self.slide.title,
                        "content": self.slide.content,
                        "primary_data": self.primary_data,
                        "secondary_data": "",
                        "json_format": parser.get_format_instructions(),
                    }
                )
                try:
                    chain_output = self.parsing_class(**chain_output).dict()
                    error_output = False
                except Exception:
                    logging.error(
                        "Error in formatting the slide content", exc_info=True
                    )
                    time.sleep(10)

        return chain_output


# if __name__ == '__main__':

#     import pickle
#     with open("services/ppt_generator/slide.pkl", "rb") as file:
#         slide_data = pickle.load(file)


#     import inspect
#     import importlib
#     import sys
#     import json

#     sys.path.append(r".")

#     # from services.ppt_generator.data_classes import slide_layout_modelsimport inspect
#     module = importlib.import_module("services.ppt_generator.data_classes.slide_layout_models")

#     module_members = inspect.getmembers(module, inspect.isclass)
#     classes_to_remove = [
#         'bullet_text',
#         'centered_timeline',
#         'circular_process_flow',
#         "flowchart",
#         "full_page_iconography",
#         "graph",
#         "graph_with_text",
#     ]
#     for member in module_members:
#         if "services.ppt_generator.data_classes.slide_layout_models" in str(member[1]) and "_data" not in member[0] and member[0] not in classes_to_remove:

#             print(member[0])

#             slide_research = SlideResearch(
#                 slide=slide_data.slide,
#                 primary_data=slide_data.primary_research_text,
#                 secondary_data=slide_data.secondary_research_text,
#                 parsing_class= member[1],
#                 prompt_addition=''
#             )

#             output_data = slide_research._format()

#             print(json.dumps(output_data))
