import json
import os
import boto3
import logging
from typing import Dict, Any

# Import your existing DocUploader code
sys.path.append("/opt")  # Lambda layer path
from utils.document_loader.DocUploader import DocUploader
from services.ppt_generator.data_classes.project import Project
from utils.client_check import ClientConfig

# Configure logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)


def lambda_handler(event, context):
    """
    Lambda function handler that processes S3 upload events.

    Args:
        event: The event dict from AWS
        context: The context object from AWS

    Returns:
        Dict with processing results
    """
    logger.info("Received event: %s", json.dumps(event))

    try:
        # Extract S3 bucket and key from the event
        for record in event["Records"]:
            if record["eventName"].startswith("ObjectCreated:"):
                bucket_name = record["s3"]["bucket"]["name"]
                object_key = record["s3"]["object"]["key"]

                # Skip if not a valid document
                if not object_key.lower().endswith((".pdf", ".docx", ".doc", ".pptx")):
                    logger.info(f"Skipping unsupported file: {object_key}")
                    continue

                # Extract project_id and client from the object key
                # Assuming the key format is: {client}/{project_id}/filename.ext
                path_parts = object_key.split("/")
                if len(path_parts) < 3:
                    logger.warning(f"Invalid path format: {object_key}")
                    continue

                client_name = path_parts[0]
                project_id = path_parts[1]

                # Get project and client config
                project = Project.check_project_in_db(project_id=project_id)
                client_config = ClientConfig(client_name).get_client_config()

                # Process the document
                s3_path = f"s3://{bucket_name}/{object_key}"
                logger.info(f"Processing document: {s3_path}")

                # Initialize DocUploader with the S3 path
                doc_uploader = DocUploader(project, client_config, s3_path)

                # Process the document
                doc_uploader.chunks = doc_uploader.upload_to_chromadb()
                doc_uploader.follow_up_questions()

                # Run the async summarization in a synchronous context
                import asyncio

                asyncio.run(doc_uploader.summarize_chunks(doc_uploader.chunks))

                # Upload to DynamoDB
                doc_uploader.upload_to_dynamodb()

                logger.info(f"Successfully processed document: {s3_path}")

        return {
            "statusCode": 200,
            "body": json.dumps("Document processing completed successfully"),
        }
    except Exception as e:
        logger.error(f"Error processing S3 event: {str(e)}", exc_info=True)
        return {
            "statusCode": 500,
            "body": json.dumps(f"Error processing document: {str(e)}"),
        }
