import os
import httpx
from fastapi import APIRouter, UploadFile, File, Depends, HTTPException, status
from fastapi.responses import FileResponse
import shutil
from pathlib import Path
from sqlmodel import Session, select, SQLModel
from typing import List, Dict, Any, Optional
from jose import JWTError, jwt
from datetime import datetime

from database import get_session, settings
from models import Resume, User
from utils import extract_text_from_file
from services.openai_service import parse_resume_content, match_resume_to_jd, bulk_rank_resumes
from routers.auth import get_current_user

# Get the absolute path for uploads directory
BASE_DIR = Path(__file__).resolve().parent.parent
UPLOAD_DIR = BASE_DIR / "uploads"
UPLOAD_DIR.mkdir(parents=True, exist_ok=True)

router = APIRouter(
    prefix="/resumes",
    tags=["resumes"]
)

class SaveParsedRequest(SQLModel):
    filename: str
    name: Optional[str] = None
    email: Optional[str] = None
    phone: Optional[str] = None
    location: Optional[str] = None
    linkedin_url: Optional[str] = None
    github_url: Optional[str] = None
    skills: Optional[str] = None
    summary: Optional[str] = None
    experience: Optional[str] = None
    projects: Optional[str] = None
    education: Optional[str] = None
    certifications: Optional[str] = None
    languages: Optional[str] = None
    others: Optional[str] = None
    raw_text: str
    file_path: str

@router.post("/parse-only")
async def parse_only(file: UploadFile = File(...), current_user: User = Depends(get_current_user)):
    """Parses a resume and saves the file to disk for previewing."""
    
    # Use timestamp to prevent name collisions
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    safe_name = "".join([c for c in file.filename if c.isalnum() or c in "._-"])
    final_filename = f"{timestamp}_{safe_name}"
    dest_path = UPLOAD_DIR / final_filename
    
    with open(dest_path, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)
    
    # Seek back to read text
    file.file.seek(0)
    text_content = await extract_text_from_file(file)
    
    if not text_content or not text_content.strip():
        # Clean up if unreadable
        if dest_path.exists():
            os.remove(dest_path)
        raise HTTPException(status_code=400, detail="Empty or unreadable file content.")
    
    parsed_data = parse_resume_content(text_content)
    # We always return the path so it can be saved later
    parsed_data["raw_text"] = text_content
    parsed_data["filename"] = file.filename
    parsed_data["file_path"] = str(dest_path)
    return parsed_data

@router.post("/save-parsed")
async def save_parsed(data: SaveParsedRequest, current_user: User = Depends(get_current_user), session: Session = Depends(get_session)):
    """Saves manually verified/edited resume data to the database."""
    # Check for duplicate email if provided
    if data.email:
        existing = session.exec(select(Resume).where(Resume.email == data.email)).first()
        if existing:
            raise HTTPException(status_code=400, detail=f"Candidate with email {data.email} already exists.")

    resume = Resume(
        filename=data.filename,
        name=data.name,
        email=data.email,
        phone=data.phone,
        location=data.location,
        linkedin_url=data.linkedin_url,
        github_url=data.github_url,
        skills={"content": data.skills},
        summary={"content": data.summary},
        experience={"content": data.experience},
        projects={"content": data.projects},
        education={"content": data.education},
        certifications={"content": data.certifications},
        languages={"content": data.languages},
        others={"content": data.others},
        raw_data={"text": data.raw_text},
        file_path=data.file_path
    )
    session.add(resume)
    session.commit()
    session.refresh(resume)
    return resume

class UploadResponse(SQLModel):
    success_count: int
    resumes: List[Resume]
    skipped: List[Dict[str, str]]

class JDMatchRequest(SQLModel):
    jd_text: str

async def enrich_github_data(github_url: str) -> dict:
    """Simple GitHub scraper to fetch public profile data."""
    if not github_url or "github.com" not in github_url:
        return {}
    try:
        username = github_url.rstrip("/").split("/")[-1]
        async with httpx.AsyncClient() as client:
            response = await client.get(f"https://api.github.com/users/{username}")
            if response.status_code == 200:
                data = response.json()
                return {
                    "public_repos": data.get("public_repos"),
                    "followers": data.get("followers"),
                    "bio": data.get("bio")
                }
    except Exception as e:
        print(f"Error enriching GitHub data: {e}")
    return {}

@router.post("/upload", response_model=UploadResponse)
async def upload_resumes(
    files: List[UploadFile] = File(...),
    current_user: User = Depends(get_current_user),
    session: Session = Depends(get_session)
):
    uploaded_resumes = []
    skipped_files = []
    
    allowed_extensions = {".pdf", ".docx", ".txt"}
    
    for file in files:
        filename = file.filename
        ext = os.path.splitext(filename)[1].lower()
        
        if ext not in allowed_extensions:
            skipped_files.append({"filename": filename, "reason": "Unsupported file format"})
            continue

        try:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            safe_name = "".join([c for c in file.filename if c.isalnum() or c in "._-"])
            final_filename = f"{timestamp}_{safe_name}"
            dest_path = UPLOAD_DIR / final_filename
            
            with open(dest_path, "wb") as buffer:
                shutil.copyfileobj(file.file, buffer)
            
            file.file.seek(0)
            text_content = await extract_text_from_file(file)
            
            if not text_content or not text_content.strip():
                skipped_files.append({"filename": filename, "reason": "Empty or unreadable content"})
                continue 
                
            parsed_data = parse_resume_content(text_content)
            
            if not parsed_data.get("is_resume", True):
                skipped_files.append({"filename": filename, "reason": "Not a valid Resume"})
                continue

            email = parsed_data.get("email")
            if email:
                existing = session.exec(select(Resume).where(Resume.email == email)).first()
                if existing:
                    skipped_files.append({"filename": filename, "reason": f"Email {email} exists"})
                    continue
            
            github_url = parsed_data.get("github_url")
            github_extra = await enrich_github_data(github_url) if github_url else {}

            resume = Resume(
                filename=filename,
                name=parsed_data.get("name"),
                email=email,
                phone=parsed_data.get("phone"),
                location=parsed_data.get("location"),
                linkedin_url=parsed_data.get("linkedin_url"),
                github_url=github_url,
                summary={"content": parsed_data.get("summary"), "github_stats": github_extra},
                experience={"content": parsed_data.get("experience")},
                projects={"content": parsed_data.get("projects")}, 
                education={"content": parsed_data.get("education")},
                skills={"content": parsed_data.get("skills")},
                certifications={"content": parsed_data.get("certifications")},
                languages={"content": parsed_data.get("languages")},
                others={"content": parsed_data.get("others")},
                raw_data={"text": text_content},
                file_path=str(dest_path)
            )
            
            session.add(resume)
            uploaded_resumes.append(resume)
        except Exception as e:
            skipped_files.append({"filename": filename, "reason": f"System error: {str(e)}"})
            continue
    
    if uploaded_resumes:
        session.commit()
        for r in uploaded_resumes:
            session.refresh(r)
    
    return UploadResponse(
        success_count=len(uploaded_resumes),
        resumes=uploaded_resumes,
        skipped=skipped_files
    )

@router.get("/search")
async def search_resumes(q: str, current_user: User = Depends(get_current_user), session: Session = Depends(get_session)):
    resumes = session.exec(select(Resume)).all()
    if not q:
        return [{"resume": r, "score": 100, "reason": "No query"} for r in resumes]
    
    # Prepare brief data for all candidates
    resumes_brief = []
    for r in resumes:
        # Construct a representative text blob
        skills = r.skills.get("content") if r.skills else ""
        summary = r.summary.get("content") if r.summary else ""
        exp = r.experience.get("content") if r.experience else ""
        # Truncate to save tokens if needed, but for now we trust GPT-4o-mini context window
        full_text = f"Name: {r.name}\nSkills: {skills}\nSummary: {summary}\nExperience: {exp[:500]}..."
        resumes_brief.append({"id": r.id, "text": full_text})

    # Use bulk ranking, treating the search query as a mini-JD
    matches = bulk_rank_resumes(q, resumes_brief)
    
    ranked_results = []
    for match in matches:
        match_id = str(match.get("id"))
        resume_obj = next((r for r in resumes if str(r.id) == match_id), None)
        if resume_obj:
            ranked_results.append({
                "resume": resume_obj,
                "score": match.get("score", 0),
                "reason": match.get("pitch", "") # Using pitch as reason
            })
    return sorted(ranked_results, key=lambda x: x["score"], reverse=True)

@router.post("/match")
async def match_jd(request: JDMatchRequest, current_user: User = Depends(get_current_user), session: Session = Depends(get_session)):
    resumes = session.exec(select(Resume)).all()
    if not resumes:
        return []

    # Prepare batch
    candidates = []
    for r in resumes:
        # Construct context for the LLM
        # Priority: manually parsed fields > raw text > OCR
        # We blend them for best context
        skills = r.skills.get("content") if r.skills else ""
        summary = r.summary.get("content") if r.summary else ""
        exp = r.experience.get("content") if r.experience else ""
        projects = r.projects.get("content") if r.projects else ""
        
        # Robust context creation
        context = f"Candidate ID: {r.id}\nName: {r.name}\nSummary: {summary}\nSkills: {skills}\nExperience: {exp}\nProjects: {projects}"
        candidates.append({"id": r.id, "profile": context})

    # Execute bulk analysis
    rankings = bulk_rank_resumes(request.jd_text, candidates)

    results = []
    for rank in rankings:
        r_id = rank.get("id")
        resume_obj = next((x for x in resumes if x.id == r_id), None)
        if resume_obj:
            results.append({
                "resume_id": resume_obj.id,
                "name": resume_obj.name,
                "score": rank.get("score", 0),
                "pitch": rank.get("pitch", "No analysis provided."),
                "missing_skills": rank.get("missing_skills", [])
            })
            
    # Filter out completely irrelevant results (score 0), unless asked otherwise
    results = [res for res in results if res["score"] > 0]
            
    return sorted(results, key=lambda x: x["score"], reverse=True)

@router.get("/", response_model=List[Resume])
async def list_resumes(current_user: User = Depends(get_current_user), session: Session = Depends(get_session)):
    return session.exec(select(Resume)).all()

@router.get("/{resume_id}", response_model=Resume)
async def get_resume(resume_id: int, current_user: User = Depends(get_current_user), session: Session = Depends(get_session)):
    """Get a single resume by ID."""
    resume = session.get(Resume, resume_id)
    if not resume:
        raise HTTPException(status_code=404, detail="Resume not found")
    return resume

@router.get("/file/{resume_id}")
async def get_resume_file(resume_id: int, token: Optional[str] = None, session: Session = Depends(get_session)):
    # Manual token verification for iframes (query param)
    if token:
        try:
            payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM])
            username: str = payload.get("sub")
            if not username:
                raise HTTPException(status_code=401, detail="Invalid token")
        except JWTError:
            raise HTTPException(status_code=401, detail="Invalid or expired token")
    else:
        raise HTTPException(status_code=401, detail="Authentication token required")

    resume = session.get(Resume, resume_id)
    if not resume:
        raise HTTPException(status_code=404, detail="Resume record not found")
    
    if not resume.file_path or not os.path.exists(resume.file_path):
        # Fallback: check if the file is in uploads with just the filename
        # This might help with some files if they were saved without absolute path
        fallback_path = UPLOAD_DIR / resume.filename
        if fallback_path.exists():
            return FileResponse(fallback_path, filename=resume.filename, content_disposition_type="inline")
            
        raise HTTPException(status_code=404, detail="Original resume file not found. It may have been uploaded before persistent storage was enabled.")
    
    return FileResponse(resume.file_path, filename=resume.filename, content_disposition_type="inline")
