from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException, NoSuchElementException
import time
import json
import os
import csv
from datetime import datetime
try:
    from openpyxl import Workbook
    OPENPYXL_AVAILABLE = True
except ImportError:
    OPENPYXL_AVAILABLE = False
    print("Warning: openpyxl not installed. Install it with: pip install openpyxl")

# Login credentials
EMAIL = "Hatem.b@sacmco.com"
PASSWORD = "Des08252026$Up13@qQ"
TARGET_URL = "https://one.dat.com/search-loads"
RECOVERY_CODE_FILE = "dat_recovery_code.txt"
# Initial recovery code (will be replaced after first use)
INITIAL_RECOVERY_CODE = "FCEWGGUCWPTL7TGABTYSBNAB"

def setup_driver(headless=False):
    """Setup Chrome driver with appropriate options
    
    Args:
        headless: If True, run browser in headless mode (for server/cron use)
    """
    chrome_options = Options()
    
    # Enable headless mode if requested (useful for server/cron jobs)
    if headless:
        chrome_options.add_argument("--headless")
        print("Running in headless mode (for server use)")
    
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--disable-dev-shm-usage")
    chrome_options.add_argument("--disable-blink-features=AutomationControlled")
    chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
    chrome_options.add_experimental_option('useAutomationExtension', False)
    
    # Set user agent to avoid detection
    chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36")
    
    driver = webdriver.Chrome(options=chrome_options)
    if not headless:
        driver.maximize_window()
    return driver

def save_recovery_code(recovery_code):
    """Save recovery code to file for next use"""
    try:
        with open(RECOVERY_CODE_FILE, 'w') as f:
            f.write(recovery_code.strip())
        print(f"✓ Recovery code saved to {RECOVERY_CODE_FILE}")
        return True
    except Exception as e:
        print(f"Error saving recovery code: {e}")
        return False

def load_recovery_code():
    """Load recovery code from file, or return initial code if file doesn't exist"""
    try:
        if os.path.exists(RECOVERY_CODE_FILE):
            with open(RECOVERY_CODE_FILE, 'r') as f:
                code = f.read().strip()
            if code:
                print(f"✓ Loaded recovery code from {RECOVERY_CODE_FILE}")
                return code
    except Exception as e:
        print(f"Error loading recovery code: {e}")
    
    # Return initial recovery code if file doesn't exist or is empty
    print(f"Using initial recovery code")
    return INITIAL_RECOVERY_CODE

def handle_login_anyway_modal(driver):
    """Handle the 'Login Anyway' modal dialog if it appears after login"""
    try:
        print("Checking for 'Login Anyway' modal...")
        modal_wait = WebDriverWait(driver, 5)
        
        # Look for the modal dialog
        modal_selectors = [
            "mat-dialog-container",
            ".cdk-overlay-pane mat-dialog-container",
            "dat-prewarn-dialog-component"
        ]
        
        modal_found = False
        for selector in modal_selectors:
            try:
                modal = driver.find_element(By.CSS_SELECTOR, selector)
                if modal.is_displayed():
                    modal_found = True
                    print("✓ 'Login Anyway' modal detected")
                    break
            except:
                continue
        
        if not modal_found:
            # Try XPath to find modal
            try:
                modal = driver.find_element(By.XPATH, "//mat-dialog-container")
                if modal.is_displayed():
                    modal_found = True
                    print("✓ 'Login Anyway' modal detected (using XPath)")
            except:
                pass
        
        if modal_found:
            # Find and click "LOGIN ANYWAY" button
            print("Looking for 'LOGIN ANYWAY' button...")
            button_clicked = False
            
            # Try multiple methods to find and click the button
            button_selectors = [
                # CSS selectors
                ("CSS", "button.mat-raised-button[mat-dialog-close]"),
                ("CSS", "button[mat-dialog-close][color='primary']"),
                ("CSS", "button.mat-primary.mat-raised-button"),
                ("CSS", "mat-dialog-actions button.mat-primary"),
                # XPath selectors
                ("XPath", "//button[contains(., 'LOGIN ANYWAY')]"),
                ("XPath", "//button[contains(text(), 'LOGIN ANYWAY')]"),
                ("XPath", "//button[@mat-dialog-close and contains(., 'LOGIN ANYWAY')]"),
                ("XPath", "//mat-dialog-actions//button[contains(., 'LOGIN ANYWAY')]")
            ]
            
            for selector_type, selector in button_selectors:
                try:
                    if selector_type == "CSS":
                        button = modal_wait.until(
                            EC.element_to_be_clickable((By.CSS_SELECTOR, selector))
                        )
                    else:  # XPath
                        button = modal_wait.until(
                            EC.element_to_be_clickable((By.XPATH, selector))
                        )
                    
                    # Scroll to button
                    driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", button)
                    time.sleep(0.5)
                    
                    # Click the button
                    button.click()
                    print(f"✓ 'LOGIN ANYWAY' button clicked (using {selector_type}: {selector})")
                    button_clicked = True
                    time.sleep(2)  # Wait for modal to close
                    break
                except Exception as e:
                    continue
            
            # Final fallback - try to find any button with "LOGIN ANYWAY" text
            if not button_clicked:
                try:
                    # Get all buttons and find the one with "LOGIN ANYWAY" text
                    buttons = driver.find_elements(By.TAG_NAME, "button")
                    for btn in buttons:
                        if "LOGIN ANYWAY" in btn.text.upper():
                            driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", btn)
                            time.sleep(0.5)
                            btn.click()
                            print("✓ 'LOGIN ANYWAY' button clicked (by text search)")
                            button_clicked = True
                            time.sleep(2)
                            break
                except Exception as e:
                    print(f"⚠ Could not click 'LOGIN ANYWAY' button: {e}")
            
            if not button_clicked:
                print("⚠ Warning: Could not click 'LOGIN ANYWAY' button")
        else:
            print("No 'Login Anyway' modal found - continuing normally")
            
    except Exception as e:
        # Modal might not appear, which is fine
        print(f"No modal detected or error handling modal: {e}")
        pass

def login(driver):
    """Login to the DAT website - handles two-step login process"""
    print("Navigating to login page...")
    driver.get("https://one.dat.com")
    
    wait = WebDriverWait(driver, 20)
    
    try:
        # Step 1: Enter email
        print("Step 1: Looking for email field...")
        email_field = wait.until(
            EC.presence_of_element_located((By.ID, "username"))
        )
        email_field.clear()
        email_field.send_keys(EMAIL)
        print("Email entered:", EMAIL)
        
        # Find and click CONTINUE button (class: _button-login-id)
        print("Looking for CONTINUE button...")
        continue_button = wait.until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "button._button-login-id, button[type='submit'][name='action'][value='default'][data-action-button-primary='true']"))
        )
        continue_button.click()
        print("CONTINUE button clicked")
        
        # Step 2: Wait for password field to appear (form changes to _form-login-password)
        print("Step 2: Waiting for password screen to appear...")
        # Wait for the form to change to password form
        wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "form._form-login-password"))
        )
        print("Password form detected")
        time.sleep(1)  # Brief pause for form to fully render
        
        # Find password field - it has id="password" and type="text" (not password type)
        print("Looking for password field...")
        password_field = wait.until(
            EC.presence_of_element_located((By.ID, "password"))
        )
        password_field.clear()
        password_field.send_keys(PASSWORD)
        print("Password entered")
        
        # Find and click the final CONTINUE button (class: _button-login-password)
        print("Looking for CONTINUE button on password screen...")
        login_button = wait.until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "button._button-login-password, button[type='submit'][name='action'][value='default']"))
        )
        login_button.click()
        print("CONTINUE button clicked on password screen")
        
        # Step 3: Handle MFA/OTP screen - click "TRY ANOTHER METHOD" then select Recovery code
        print("Step 3: Checking for MFA/OTP verification screen...")
        time.sleep(3)  # Wait for page to transition
        
        try:
            # First, check if we're on the OTP/MFA screen
            mfa_wait = WebDriverWait(driver, 10)
            
            # Look for OTP input field or "TRY ANOTHER METHOD" button
            try:
                # Check if OTP input field exists (indicates we're on OTP screen)
                otp_field = mfa_wait.until(
                    EC.presence_of_element_located((By.ID, "code"))
                )
                print("✓ OTP/MFA screen detected")
                
                # Step 3a: Click "TRY ANOTHER METHOD" button
                print("Clicking 'TRY ANOTHER METHOD' button...")
                try_another_method_btn = mfa_wait.until(
                    EC.element_to_be_clickable((By.CSS_SELECTOR, 
                        "form.ulp-action-form-pick-authenticator button, "
                        ".ulp-alternate-action button[type='submit'], "
                        "button[name='action'][value='pick-authenticator']"))
                )
                try_another_method_btn.click()
                print("✓ 'TRY ANOTHER METHOD' button clicked")
                time.sleep(2)  # Wait for method selection screen to load
                
            except TimeoutException:
                print("OTP input field not found, checking if already on method selection screen...")
                # If OTP field not found, we might already be on method selection screen
                time.sleep(1)
            
            # Step 3b: Wait for method selection screen and click "Recovery code" option
            print("Waiting for method selection screen...")
            method_wait = WebDriverWait(driver, 15)
            
            # Look for "Recovery code" button/option
            recovery_code_option = method_wait.until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, 
                    "form.ulp-action-form-recovery-code button, "
                    "li._selector-item-recovery-code button, "
                    "button[aria-label='Recovery code'], "
                    "button[value*='recovery-code']"))
            )
            print("✓ Found 'Recovery code' option")
            recovery_code_option.click()
            print("✓ 'Recovery code' option clicked")
            time.sleep(2)  # Wait for recovery code input screen to load
            
            # Step 3c: Now enter the recovery code
            print("Waiting for recovery code input field...")
            recovery_wait = WebDriverWait(driver, 10)
            recovery_code_field = recovery_wait.until(
                EC.presence_of_element_located((By.ID, "code"))
            )
            print("✓ Recovery code input field found")
            
            # Load recovery code (from file or use initial)
            recovery_code = load_recovery_code()
            print(f"Entering recovery code: {recovery_code[:10]}...")
            
            # Enter recovery code
            recovery_code_field.clear()
            recovery_code_field.send_keys(recovery_code)
            print("Recovery code entered")
            
            # Find and click CONTINUE button
            continue_button = recovery_wait.until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, "button[type='submit'][name='action'][value='default'][data-action-button-primary='true']"))
            )
            continue_button.click()
            print("CONTINUE button clicked on recovery code screen")
            
            # Step 4: Wait for new recovery code screen and extract it
            print("Step 4: Waiting for new recovery code screen...")
            time.sleep(3)
            
            # Wait for the "Almost There!" heading to confirm we're on the right screen
            try:
                new_code_wait = WebDriverWait(driver, 15)
                # Try to find the heading using XPath or class
                heading = new_code_wait.until(
                    EC.presence_of_element_located((By.XPATH, "//h1[contains(text(), 'Almost There!')]"))
                )
                print("✓ 'Almost There!' screen detected")
            except:
                print("⚠ Could not confirm 'Almost There!' screen, but continuing...")
            
            try:
                # Look for the new recovery code display
                new_code_wait = WebDriverWait(driver, 15)
                # Try multiple selectors to find the recovery code
                # The code is in: <div class="input multiline">MSAVYA27YWN5SBXPA6UJAVU1</div>
                new_code_element = None
                selectors = [
                    "div.input.multiline",  # Primary selector - exact match
                    "div[class*='input'][class*='multiline']",  # Partial class match
                    "div.c0d22010c.ca6d75954 div.input.multiline",  # Nested structure
                    "div.c0d22010c.ca6d75954 > div"  # Direct child
                ]
                
                for selector in selectors:
                    try:
                        new_code_element = new_code_wait.until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, selector))
                        )
                        # Check if element has text content
                        code_text = new_code_element.text.strip()
                        if code_text and len(code_text) > 10:  # Recovery codes are typically long
                            break
                        else:
                            new_code_element = None
                    except:
                        continue
                
                if not new_code_element:
                    raise TimeoutException("Could not find recovery code element")
                
                # Extract the new recovery code
                new_recovery_code = new_code_element.text.strip()
                print(f"\n✓ New recovery code received: {new_recovery_code}")
                
                # Save the new recovery code for next time
                if save_recovery_code(new_recovery_code):
                    print("✓ New recovery code saved for next login")
                
                # Small delay to ensure page is fully loaded after saving
                time.sleep(1)
                
                # Step 1: Find and check the "I have safely recorded this code" checkbox
                print("\nStep 1: Checking 'I have safely recorded this code' checkbox...")
                checkbox_clicked = False
                checkbox = None
                
                # Method 1: Try the provided XPath first (direct checkbox ID)
                try:
                    print("Trying XPath: //*[@id='saved']")
                    checkbox = new_code_wait.until(
                        EC.presence_of_element_located((By.XPATH, "//*[@id='saved']"))
                    )
                    print("✓ Checkbox found using XPath")
                except Exception as e:
                    print(f"⚠ Could not find checkbox with XPath //*[@id='saved']: {e}")
                
                # Method 2: Try CSS selector by ID
                if not checkbox:
                    try:
                        print("Trying CSS selector: #saved")
                        checkbox = new_code_wait.until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, "#saved"))
                        )
                        print("✓ Checkbox found using CSS selector")
                    except Exception as e:
                        print(f"⚠ Could not find checkbox with CSS #saved: {e}")
                
                # Method 3: Try other selectors
                if not checkbox:
                    selectors = [
                        "input#saved",
                        "input[type='checkbox'][id='saved']",
                        "input[type='checkbox'][name='saved']",
                        "div.c5d5060aa input[type='checkbox']"
                    ]
                    for selector in selectors:
                        try:
                            checkbox = driver.find_element(By.CSS_SELECTOR, selector)
                            print(f"✓ Checkbox found using: {selector}")
                            break
                        except:
                            continue
                
                # If checkbox found, try to click it using multiple methods
                if checkbox:
                    try:
                        # Scroll to checkbox
                        driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", checkbox)
                        time.sleep(0.5)
                        
                        # Check current state
                        is_checked = checkbox.is_selected()
                        print(f"Checkbox current state: {'checked' if is_checked else 'unchecked'}")
                        
                        if not is_checked:
                            # Try multiple click methods
                            click_methods = [
                                ("regular click", lambda: checkbox.click()),
                                ("JavaScript click", lambda: driver.execute_script("arguments[0].click();", checkbox)),
                                ("set checked property", lambda: driver.execute_script("arguments[0].checked = true;", checkbox)),
                            ]
                            
                            for method_name, click_method in click_methods:
                                try:
                                    print(f"Trying {method_name}...")
                                    click_method()
                                    time.sleep(0.3)
                                    
                                    # Verify it's checked
                                    if checkbox.is_selected():
                                        print(f"✓ Checkbox checked successfully using {method_name}")
                                        checkbox_clicked = True
                                        break
                                    else:
                                        print(f"⚠ {method_name} did not check the checkbox")
                                except Exception as e:
                                    print(f"⚠ Error with {method_name}: {e}")
                                    continue
                            
                            # If checkbox still not checked, try clicking the label
                            if not checkbox_clicked:
                                try:
                                    print("Trying to click the label instead...")
                                    label = driver.find_element(By.XPATH, "//label[@for='saved']")
                                    driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", label)
                                    time.sleep(0.3)
                                    label.click()
                                    time.sleep(0.3)
                                    
                                    if checkbox.is_selected():
                                        print("✓ Checkbox checked by clicking label")
                                        checkbox_clicked = True
                                    else:
                                        # Force check using JavaScript
                                        driver.execute_script("arguments[0].checked = true; arguments[0].dispatchEvent(new Event('change'));", checkbox)
                                        time.sleep(0.3)
                                        if checkbox.is_selected():
                                            print("✓ Checkbox checked using JavaScript force method")
                                            checkbox_clicked = True
                                except Exception as e:
                                    print(f"⚠ Could not click label: {e}")
                        else:
                            print("✓ Checkbox already checked")
                            checkbox_clicked = True
                            
                    except Exception as e:
                        print(f"⚠ Error interacting with checkbox: {e}")
                else:
                    print("⚠ Could not find checkbox element")
                
                # Final verification
                if checkbox and checkbox.is_selected():
                    print("✓ Checkbox is confirmed checked")
                    checkbox_clicked = True
                elif checkbox:
                    print("⚠ Warning: Checkbox was found but could not be checked")
                else:
                    print("⚠ Warning: Checkbox element not found - continuing anyway")
                
                # Small delay after clicking checkbox
                if checkbox_clicked:
                    time.sleep(0.5)
                
                # Step 2: Find and click Continue button
                print("\nStep 2: Clicking Continue button...")
                continue_clicked = False
                continue_selectors = [
                    "button[type='submit'][name='action'][value='default'][data-action-button-primary='true']",
                    "button[data-action-button-primary='true'][type='submit']",
                    "button[type='submit'][name='action'][value='default']"
                ]
                
                for selector in continue_selectors:
                    try:
                        continue_btn = new_code_wait.until(
                            EC.element_to_be_clickable((By.CSS_SELECTOR, selector))
                        )
                        # Scroll to button to ensure it's visible
                        driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", continue_btn)
                        time.sleep(0.5)
                        continue_btn.click()
                        print("✓ Continue button clicked")
                        continue_clicked = True
                        break
                    except TimeoutException:
                        continue
                    except Exception as e:
                        print(f"⚠ Error with Continue button selector {selector}: {e}")
                        continue
                
                if not continue_clicked:
                    print("⚠ Could not find Continue button with CSS selectors - trying XPath...")
                    try:
                        continue_btn = driver.find_element(By.XPATH, "//button[contains(text(), 'Continue')]")
                        driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", continue_btn)
                        time.sleep(0.5)
                        continue_btn.click()
                        print("✓ Continue button clicked (using XPath)")
                        continue_clicked = True
                    except Exception as e:
                        print(f"⚠ Could not click Continue button: {e}")
                        raise Exception("Failed to click Continue button after multiple attempts")
                
                # Wait for navigation after clicking continue
                print("Waiting for page navigation...")
                time.sleep(3)
                
            except TimeoutException as e:
                print(f"⚠ Error on new recovery code screen: {e}")
                print("⚠ New recovery code screen not found, but continuing...")
            
        except TimeoutException:
            # Recovery code screen might not appear, or we might have already passed it
            print("Recovery code screen not detected, continuing...")
        
        # Step 5: Wait for successful login and redirect
        print("Step 5: Waiting for successful login...")
        time.sleep(3)
        
        # Check if we're logged in by checking the URL
        max_wait_time = 30  # 30 seconds max wait
        check_interval = 1  # Check every second
        waited_time = 0
        
        while waited_time < max_wait_time:
            current_url = driver.current_url.lower()
            
            # Check if we're no longer on a login page
            if "login" not in current_url and "u/login" not in current_url:
                # Check if we're on the target page or dashboard
                if "search-loads" in current_url or "one.dat.com" in current_url:
                    print("\n✓ Login successful! Detected redirect from login page.")
                    break
            
            time.sleep(check_interval)
            waited_time += check_interval
        
        if waited_time >= max_wait_time:
            print("\n⚠ Timeout waiting for login completion. Please check the browser.")
            return False
        
        # Navigate to target URL after successful login
        print(f"\nNavigating to target URL: {TARGET_URL}")
        driver.get(TARGET_URL)
        time.sleep(5)  # Give it some time to load
        
        # Check for and handle "Login Anyway" modal if it appears
        handle_login_anyway_modal(driver)
        
        return True
            
    except TimeoutException as e:
        print(f"Timeout error during login: {e}")
        print("Current URL:", driver.current_url)
        # Save page source for debugging
        with open("login_error_page_source.html", "w", encoding="utf-8") as f:
            f.write(driver.page_source)
        print("Error page source saved to login_error_page_source.html")
        return False
    except NoSuchElementException as e:
        print(f"Element not found during login: {e}")
        print("Current URL:", driver.current_url)
        # Save page source for debugging
        with open("login_error_page_source.html", "w", encoding="utf-8") as f:
            f.write(driver.page_source)
        print("Error page source saved to login_error_page_source.html")
        return False

def fill_search_filters(driver):
    """Fill in the search form filters"""
    wait = WebDriverWait(driver, 20)
    
    try:
        print("\nFilling search filters...")
        
        # 1. Fill Origin: New York Mills, NY
        print("Filling Origin: New York Mills, NY...")
        origin_input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "input[data-test='origin-input']"))
        )
        origin_input.clear()
        origin_input.send_keys("New York Mills, NY")
        time.sleep(2)  # Wait for autocomplete
        
        # Select from autocomplete if it appears
        try:
            autocomplete_option = WebDriverWait(driver, 3).until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, "mat-option:first-child, .mat-autocomplete-panel mat-option:first-child"))
            )
            autocomplete_option.click()
            print("Origin selected from autocomplete")
            time.sleep(1)
        except:
            print("No autocomplete option found for origin, continuing...")
        
        # 2. Fill DH-O: 15
        print("Filling DH-O: 15...")
        dho_input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "input[data-test='dho-input']"))
        )
        dho_input.clear()
        dho_input.send_keys("15")
        time.sleep(0.5)
        
        # 3. Fill Destination: CO
        print("Filling Destination: CO...")
        dest_input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "input[data-test='destination-input']"))
        )
        dest_input.clear()
        dest_input.send_keys("CO")
        time.sleep(2)  # Wait for autocomplete
        
        # Select from autocomplete if it appears
        try:
            autocomplete_option = WebDriverWait(driver, 3).until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, "mat-option:first-child, .mat-autocomplete-panel mat-option:first-child"))
            )
            autocomplete_option.click()
            print("Destination selected from autocomplete")
            time.sleep(1)
        except:
            print("No autocomplete option found for destination, continuing...")
        
        # 4. Fill DH-D: 150
        print("Filling DH-D: 150...")
        dhd_input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "input[data-test='dhd-input']"))
        )
        dhd_input.clear()
        dhd_input.send_keys("150")
        time.sleep(0.5)
        
        # 5. Equipment Type: Select Flatbeds
        print("Selecting Equipment Type: Flatbeds...")
        try:
            equipment_input = wait.until(
                EC.presence_of_element_located((By.CSS_SELECTOR, "input[data-test='equipment-type-dropdown']"))
            )
            
            # Scroll to the input to ensure it's visible
            driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", equipment_input)
            time.sleep(0.5)
            
            # Check if there's an existing chip (e.g., "Vans (Standard)")
            try:
                existing_chip = driver.find_element(By.CSS_SELECTOR, "mat-chip.mat-chip")
                if existing_chip:
                    # Remove the existing chip by clicking the remove icon
                    remove_icon = existing_chip.find_element(By.CSS_SELECTOR, "mat-icon.mat-chip-remove, .mat-chip-remove")
                    remove_icon.click()
                    print("✓ Removed existing equipment type chip")
                    time.sleep(1.5)
            except:
                print("No existing equipment chip found")
            
            # Click on the equipment input field to focus it
            # Try regular click first, fallback to JavaScript click if element not interactable
            try:
                # Wait for element to be clickable
                equipment_input = WebDriverWait(driver, 5).until(
                    EC.element_to_be_clickable((By.CSS_SELECTOR, "input[data-test='equipment-type-dropdown']"))
                )
                equipment_input.click()
                time.sleep(0.5)
            except Exception as e:
                print(f"⚠ Regular click failed, trying JavaScript click: {e}")
                # Use JavaScript click as fallback
                driver.execute_script("arguments[0].click();", equipment_input)
                time.sleep(0.5)
                # Also try to focus it
                driver.execute_script("arguments[0].focus();", equipment_input)
                time.sleep(0.3)
            
            # Clear any existing value
            try:
                equipment_input.clear()
                time.sleep(0.3)
            except:
                # If clear fails, use JavaScript to set value
                driver.execute_script("arguments[0].value = '';", equipment_input)
                time.sleep(0.3)
            
            # Type "Flatbeds" in the input
            try:
                equipment_input.send_keys("Flatbeds")
            except:
                # If send_keys fails, use JavaScript to set value
                driver.execute_script("arguments[0].value = 'Flatbeds';", equipment_input)
                # Trigger input event
                driver.execute_script("arguments[0].dispatchEvent(new Event('input', { bubbles: true }));", equipment_input)
                time.sleep(0.5)
            print("Typed 'Flatbeds' in equipment input")
            time.sleep(2.5)  # Wait for autocomplete options to appear
            
            # Wait for autocomplete panel to appear
            autocomplete_selected = False
            try:
                # Wait for the autocomplete panel to be visible
                autocomplete_panel = WebDriverWait(driver, 5).until(
                    EC.presence_of_element_located((By.CSS_SELECTOR, 
                        "mat-autocomplete-panel, .mat-autocomplete-panel, .cdk-overlay-pane mat-autocomplete"))
                )
                print("✓ Autocomplete panel appeared")
                
                # Try multiple selectors to find the Flatbeds option
                option_selectors = [
                    (By.XPATH, "//mat-option[contains(., 'Flatbeds')]"),
                    (By.XPATH, "//mat-option[contains(., 'Flatbed')]"),
                    (By.XPATH, "//mat-option[contains(translate(., 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'flatbed')]"),
                    (By.CSS_SELECTOR, "mat-option"),
                ]
                
                for selector_type, selector_value in option_selectors:
                    try:
                        if selector_type == By.CSS_SELECTOR:
                            # For CSS selector, get all options and find the one with Flatbeds
                            options = driver.find_elements(By.CSS_SELECTOR, "mat-option")
                            for option in options:
                                option_text = option.text.strip()
                                if "flatbed" in option_text.lower():
                                    # Scroll to option
                                    driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", option)
                                    time.sleep(0.3)
                                    option.click()
                                    print(f"✓ Equipment type 'Flatbeds' selected (found: {option_text})")
                                    autocomplete_selected = True
                                    time.sleep(1)
                                    break
                            if autocomplete_selected:
                                break
                        else:
                            # For XPath selectors
                            flatbeds_option = WebDriverWait(driver, 3).until(
                                EC.element_to_be_clickable((selector_type, selector_value))
                            )
                            # Scroll to option
                            driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", flatbeds_option)
                            time.sleep(0.3)
                            flatbeds_option.click()
                            print(f"✓ Equipment type 'Flatbeds' selected")
                            autocomplete_selected = True
                            time.sleep(1)
                            break
                    except TimeoutException:
                        continue
                    except Exception as e:
                        print(f"⚠ Error with selector {selector_value}: {e}")
                        continue
                
                if not autocomplete_selected:
                    # Fallback: Try using arrow keys and Enter
                    print("⚠ Could not click option, trying keyboard navigation...")
                    try:
                        # Make sure input is focused
                        driver.execute_script("arguments[0].focus();", equipment_input)
                        time.sleep(0.3)
                        equipment_input.send_keys(Keys.ARROW_DOWN)
                        time.sleep(0.5)
                        equipment_input.send_keys(Keys.ENTER)
                        print("✓ Selected first option using keyboard")
                        time.sleep(1)
                        autocomplete_selected = True
                    except Exception as e:
                        print(f"⚠ Keyboard navigation failed: {e}")
                    
            except TimeoutException:
                print("⚠ Autocomplete panel did not appear, trying keyboard navigation...")
                # Try using arrow keys and Enter as fallback
                try:
                    # Make sure input is focused
                    driver.execute_script("arguments[0].focus();", equipment_input)
                    time.sleep(0.3)
                    equipment_input.send_keys(Keys.ARROW_DOWN)
                    time.sleep(0.5)
                    equipment_input.send_keys(Keys.ENTER)
                    print("✓ Selected option using keyboard")
                    time.sleep(1)
                except Exception as e:
                    print(f"⚠ Keyboard navigation failed: {e}")
            except Exception as e:
                print(f"⚠ Error selecting from autocomplete: {e}")
                # Try keyboard navigation as last resort
                try:
                    driver.execute_script("arguments[0].focus();", equipment_input)
                    time.sleep(0.3)
                    equipment_input.send_keys(Keys.ARROW_DOWN)
                    time.sleep(0.5)
                    equipment_input.send_keys(Keys.ENTER)
                    print("✓ Selected option using keyboard (fallback)")
                    time.sleep(1)
                except Exception as e2:
                    print(f"⚠ Keyboard navigation fallback also failed: {e2}")
            
            # Verify the selection was successful by checking for a chip
            time.sleep(1)
            try:
                chip = driver.find_element(By.CSS_SELECTOR, "mat-chip.mat-chip")
                chip_text = chip.text.strip()
                print(f"✓ Verified equipment type selected: {chip_text}")
            except:
                print("⚠ Warning: Could not verify equipment type chip was created")
                
        except Exception as e:
            print(f"⚠ Error selecting equipment type: {e}")
            import traceback
            traceback.print_exc()
        
        # 6. Load Type: Full & Partial - appears to be already selected
        print("Checking Load Type: Full & Partial...")
        # The HTML shows "Full & Partial" is already selected
        
        # Date Range is already filled, skipping
        
        time.sleep(1)  # Brief pause before clicking search
        
        # Verify equipment type is selected before searching
        print("Verifying equipment type is selected...")
        try:
            equipment_chip = driver.find_element(By.CSS_SELECTOR, "mat-chip.mat-chip")
            chip_text = equipment_chip.text.strip()
            print(f"✓ Equipment type verified: {chip_text}")
        except:
            print("⚠ Warning: Equipment type chip not found. The search may fail if equipment type is required.")
            # Try to select equipment type again using keyboard navigation
            try:
                equipment_input = driver.find_element(By.CSS_SELECTOR, "input[data-test='equipment-type-dropdown']")
                driver.execute_script("arguments[0].focus();", equipment_input)
                time.sleep(0.5)
                equipment_input.send_keys(Keys.ARROW_DOWN)
                time.sleep(0.5)
                equipment_input.send_keys(Keys.ENTER)
                time.sleep(1)
                print("✓ Attempted to select equipment type using keyboard")
            except Exception as e:
                print(f"⚠ Could not retry equipment type selection: {e}")
        
        # 8. Click Search Button
        print("Clicking Search button...")
        try:
            # Wait for search button to be present and clickable
            search_button = wait.until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, "button[data-test='search-button'], #search-automation, button.search-button"))
            )
            
            # Check if button is enabled
            if not search_button.is_enabled():
                print("⚠ Search button is disabled. Checking for validation errors...")
                # Check for error messages
                try:
                    error_elements = driver.find_elements(By.CSS_SELECTOR, "mat-error, .mat-error")
                    for error in error_elements:
                        if error.is_displayed():
                            print(f"  Validation error: {error.text}")
                except:
                    pass
                raise Exception("Search button is disabled - validation may have failed")
            
            # Scroll to button to ensure it's visible
            driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", search_button)
            time.sleep(0.5)
            
            # Try regular click first
            try:
                search_button.click()
            except Exception as e:
                print(f"⚠ Regular click failed, trying JavaScript click: {e}")
                driver.execute_script("arguments[0].click();", search_button)
            
            print("Search button clicked")
        except TimeoutException:
            print("⚠ Search button not found or not clickable")
            raise Exception("Could not find or click search button")
        except Exception as e:
            error_msg = str(e) if e else "Unknown error"
            print(f"⚠ Error clicking search button: {error_msg}")
            raise
        
        # Wait for results to load
        print("Waiting for search results...")
        time.sleep(5)
        
        # Wait for results table to appear
        wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "[data-test='results-table-body'], .loads-table, dat-search-table"))
        )
        print("Search results loaded")
        
        return True
        
    except Exception as e:
        print(f"Error filling search filters: {e}")
        driver.save_screenshot("filter_error.png")
        with open("filter_error_page_source.html", "w", encoding="utf-8") as f:
            f.write(driver.page_source)
        return False

def scrape_basic_information(driver):
    """Extract basic information from the dat-basic-information section"""
    basic_info = {}
    
    try:
        # Wait for the basic information section to load
        wait = WebDriverWait(driver, 10)
        
        # Find the dat-basic-information section
        basic_info_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-basic-information"))
        )
        
        # Extract Company Name
        try:
            company_name_elem = basic_info_section.find_element(By.CSS_SELECTOR, "div.basic-info__title.section-header__title")
            basic_info["company_name"] = company_name_elem.text.strip()
        except:
            try:
                # Fallback selector
                company_name_elem = basic_info_section.find_element(By.CSS_SELECTOR, ".section-header__title")
                basic_info["company_name"] = company_name_elem.text.strip()
            except:
                basic_info["company_name"] = ""
        
        # Extract Location (subtitle)
        try:
            location_elem = basic_info_section.find_element(By.CSS_SELECTOR, "div.section-header__subtitle")
            location_text = location_elem.text.strip()
            # Remove " in " prefix if present
            if location_text.startswith("in "):
                location_text = location_text[3:].strip()
            basic_info["location"] = location_text
        except:
            basic_info["location"] = ""
        
        # Extract fields from the fields-grid
        try:
            fields_grid = basic_info_section.find_element(By.CSS_SELECTOR, "dat-fields-grid")
            
            # Field mappings: CSS class suffix -> field name
            field_mappings = {
                "docket": "docket",
                "dotNumber": "dot_number",
                "intrastateNumber": "intrastate_number",
                "safetyRating": "safety_rating",
                "creditScore": "credit_score",  # Added for broker pages
                "daysToPay": "days_to_pay",  # Added for broker pages
                "yearFounded": "year_founded",
                "phone": "phone",
                "emailAddress": "email",
                "addressData": "address",
                "operatingStatus": "operating_status",
                "operationType": "operation_type",
                "cargoLimits": "cargo_limits"
            }
            
            for field_class, field_name in field_mappings.items():
                try:
                    # Find the label cell
                    label_cell = fields_grid.find_element(By.CSS_SELECTOR, 
                        f"div.fields-grid__cell--{field_class}.fields-grid__cell--label")
                    
                    # Find the corresponding value cell
                    value_cell = fields_grid.find_element(By.CSS_SELECTOR,
                        f"div.fields-grid__cell--{field_class}.fields-grid__cell--value")
                    
                    # Extract value text
                    value_text = value_cell.text.strip()
                    
                    # For links (docket, dotNumber, phone, email), extract the link text and href
                    if field_class in ["docket", "dotNumber", "phone", "emailAddress"]:
                        try:
                            link = value_cell.find_element(By.CSS_SELECTOR, "a")
                            # Get text from link, excluding any icon text
                            link_text = link.text.strip()
                            # Remove common icon text patterns
                            link_text = link_text.replace("launch", "").strip()
                            # Also try to get text from child nodes excluding mat-icon
                            try:
                                # Get all text nodes, excluding mat-icon
                                link_text_clean = driver.execute_script("""
                                    var link = arguments[0];
                                    var text = '';
                                    for (var i = 0; i < link.childNodes.length; i++) {
                                        var node = link.childNodes[i];
                                        if (node.nodeType === 3) { // Text node
                                            text += node.textContent;
                                        } else if (node.nodeType === 1 && !node.classList.contains('mat-icon')) {
                                            text += node.textContent || node.innerText || '';
                                        }
                                    }
                                    return text.trim();
                                """, link)
                                if link_text_clean:
                                    link_text = link_text_clean
                            except:
                                pass
                            
                            href = link.get_attribute("href")
                            if link_text:
                                basic_info[field_name] = link_text
                                basic_info[f"{field_name}_link"] = href if href else ""
                            else:
                                basic_info[field_name] = value_text
                        except:
                            basic_info[field_name] = value_text
                    # For cargo limits, extract the list item text
                    elif field_class == "cargoLimits":
                        try:
                            list_item = value_cell.find_element(By.CSS_SELECTOR, "dat-list-item")
                            # Get text excluding icon
                            cargo_text = driver.execute_script("""
                                var item = arguments[0];
                                var text = '';
                                for (var i = 0; i < item.childNodes.length; i++) {
                                    var node = item.childNodes[i];
                                    if (node.nodeType === 3) { // Text node
                                        text += node.textContent;
                                    } else if (node.nodeType === 1 && !node.classList.contains('mat-icon')) {
                                        text += node.textContent || node.innerText || '';
                                    }
                                }
                                return text.trim();
                            """, list_item)
                            if not cargo_text:
                                cargo_text = list_item.text.strip().replace("done", "").strip()
                            basic_info[field_name] = cargo_text
                        except:
                            basic_info[field_name] = value_text
                    # For address, preserve line breaks as spaces
                    elif field_class == "addressData":
                        # Get text from span, handling <br> tags
                        try:
                            address_span = value_cell.find_element(By.CSS_SELECTOR, "span")
                            # Get text with <br> converted to space
                            address_text = address_span.get_attribute("innerText") or address_span.text
                            address_text = address_text.replace("\n", " ").strip()
                            basic_info[field_name] = address_text
                        except:
                            # Fallback to regular text extraction
                            address_text = value_text.replace("\n", " ").strip()
                            basic_info[field_name] = address_text
                    else:
                        basic_info[field_name] = value_text
                        
                except NoSuchElementException:
                    # Field not found, set to empty
                    basic_info[field_name] = ""
                except Exception as e:
                    print(f"  ⚠ Error extracting {field_name}: {e}")
                    basic_info[field_name] = ""
        
        except Exception as e:
            print(f"  ⚠ Error extracting fields-grid: {e}")
    
    except TimeoutException:
        print("  ⚠ Basic information section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping basic information: {e}")
        import traceback
        traceback.print_exc()
    
    return basic_info

def extract_fields_from_grid(section_element, field_mappings, driver):
    """Helper function to extract fields from a dat-fields-grid"""
    data = {}
    try:
        fields_grid = section_element.find_element(By.CSS_SELECTOR, "dat-fields-grid")
        
        for field_class, field_name in field_mappings.items():
            try:
                # Find the label cell - try multiple approaches
                label_cell = None
                value_cell = None
                
                try:
                    label_cell = fields_grid.find_element(By.CSS_SELECTOR, 
                        f"div.fields-grid__cell--{field_class}.fields-grid__cell--label")
                except:
                    # Try without the exact class match (some pages use grid-area)
                    try:
                        all_labels = fields_grid.find_elements(By.CSS_SELECTOR, 
                            f"div.fields-grid__cell--label")
                        for lbl in all_labels:
                            if field_class in lbl.get_attribute("class") or field_class.replace("_", "") in lbl.get_attribute("class"):
                                label_cell = lbl
                                break
                    except:
                        pass
                
                if not label_cell:
                    data[field_name] = ""
                    continue
                
                # Find the corresponding value cell
                try:
                    value_cell = fields_grid.find_element(By.CSS_SELECTOR,
                        f"div.fields-grid__cell--{field_class}.fields-grid__cell--value")
                except:
                    # Try finding by index or proximity to label
                    try:
                        all_values = fields_grid.find_elements(By.CSS_SELECTOR,
                            f"div.fields-grid__cell--{field_class}.fields-grid__cell--value")
                        if all_values:
                            # Try to find the value cell that corresponds to this label
                            # by checking if they're in the same row or nearby
                            for val in all_values:
                                # Simple heuristic: if we found a label, try the first value with matching class
                                value_cell = val
                                break
                    except:
                        pass
                
                if not value_cell:
                    # Last resort: try to find by label text
                    try:
                        # Map common field names to label text
                        label_text_map = {
                            "creditScore": "Credit score:",
                            "daysToPay": "Days to pay:",
                            "docket": "Docket:",
                            "dotNumber": "DOT Number:",
                            "phone": "Phone:",
                            "emailAddress": "Email:",
                            "addressData": "Address:",
                            "operatingStatus": "Operating status:",
                            "operationType": "Operation type:"
                        }
                        
                        if field_class in label_text_map:
                            # Find label by text
                            label_xpath = f".//div[contains(@class, 'fields-grid__cell--label') and contains(text(), '{label_text_map[field_class]}')]"
                            try:
                                label_by_text = fields_grid.find_element(By.XPATH, label_xpath)
                                # Find the value cell that's in the same row or nearby
                                # This is a fallback, so we'll try a simple approach
                                parent = label_by_text.find_element(By.XPATH, "./..")
                                value_cell = parent.find_element(By.CSS_SELECTOR, 
                                    f"div.fields-grid__cell--{field_class}.fields-grid__cell--value, div.fields-grid__cell--value")
                            except:
                                pass
                    except:
                        pass
                
                if not value_cell:
                    data[field_name] = ""
                    continue
                
                # Extract value text
                try:
                    value_text = value_cell.text.strip()
                except:
                    value_text = ""
                
                # For links, extract the link text and href
                if field_class in ["docket", "dotNumber", "phone", "emailAddress", "officePhone", 
                                   "businessPhone", "mailingPhone", "insurerPhone"]:
                    try:
                        link = value_cell.find_element(By.CSS_SELECTOR, "a")
                        link_text = driver.execute_script("""
                            var link = arguments[0];
                            var text = '';
                            for (var i = 0; i < link.childNodes.length; i++) {
                                var node = link.childNodes[i];
                                if (node.nodeType === 3) {
                                    text += node.textContent;
                                } else if (node.nodeType === 1 && !node.classList.contains('mat-icon')) {
                                    text += node.textContent || node.innerText || '';
                                }
                            }
                            return text.trim();
                        """, link)
                        if not link_text:
                            link_text = link.text.strip().replace("launch", "").strip()
                        href = link.get_attribute("href")
                        if link_text:
                            data[field_name] = link_text
                            data[f"{field_name}_link"] = href if href else ""
                        else:
                            data[field_name] = value_text
                    except:
                        data[field_name] = value_text
                # For list items (cargo limits, commodities, etc.)
                elif "Limits" in field_name or "commodities" in field_name.lower() or "services" in field_name.lower():
                    try:
                        list_items = value_cell.find_elements(By.CSS_SELECTOR, "dat-list-item")
                        if list_items:
                            items_text = []
                            for item in list_items:
                                item_text = driver.execute_script("""
                                    var item = arguments[0];
                                    var text = '';
                                    for (var i = 0; i < item.childNodes.length; i++) {
                                        var node = item.childNodes[i];
                                        if (node.nodeType === 3) {
                                            text += node.textContent;
                                        } else if (node.nodeType === 1 && !node.classList.contains('mat-icon')) {
                                            var span = node.querySelector('span');
                                            if (span) text += span.textContent || span.innerText || '';
                                            else text += node.textContent || node.innerText || '';
                                        }
                                    }
                                    return text.trim();
                                """, item)
                                if item_text:
                                    items_text.append(item_text)
                            if items_text:
                                data[field_name] = "; ".join(items_text)
                            else:
                                data[field_name] = value_text
                        else:
                            data[field_name] = value_text
                    except:
                        data[field_name] = value_text
                # For address fields
                elif "address" in field_name.lower() or "Address" in field_name:
                    try:
                        address_span = value_cell.find_element(By.CSS_SELECTOR, "span")
                        address_text = address_span.get_attribute("innerText") or address_span.text
                        address_text = address_text.replace("\n", " ").strip()
                        data[field_name] = address_text
                    except:
                        data[field_name] = value_text.replace("\n", " ").strip()
                else:
                    data[field_name] = value_text
                    
            except NoSuchElementException:
                data[field_name] = ""
            except Exception as e:
                data[field_name] = ""
    
    except Exception as e:
        print(f"  ⚠ Error extracting fields-grid: {e}")
    
    return data

def scrape_office_information(driver):
    """Extract office information from dat-office-information section"""
    office_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        office_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-office-information"))
        )
        
        field_mappings = {
            "companyName": "office_company_name",
            "parent": "parent_account",
            "companyType": "company_type",
            "addressData": "office_address",
            "officePhone": "office_phone",
            "officeFax": "office_fax",
            "membership": "membership_affiliations"
        }
        
        office_info = extract_fields_from_grid(office_section, field_mappings, driver)
        
        # Extract parent account links if present
        try:
            parent_field = office_section.find_element(By.CSS_SELECTOR, 
                "div.fields-grid__cell--parent.fields-grid__cell--value")
            parent_links = parent_field.find_elements(By.CSS_SELECTOR, "a")
            parent_info = []
            for link in parent_links:
                parent_info.append({
                    "text": link.text.strip(),
                    "href": link.get_attribute("href")
                })
            if parent_info:
                office_info["parent_account_links"] = json.dumps(parent_info, ensure_ascii=False)
        except:
            pass
        
        # Extract membership logos/links
        try:
            membership_field = office_section.find_element(By.CSS_SELECTOR,
                "div.fields-grid__cell--membership.fields-grid__cell--value")
            membership_links = membership_field.find_elements(By.CSS_SELECTOR, "a.membership-link")
            membership_list = []
            for link in membership_links:
                try:
                    img = link.find_element(By.CSS_SELECTOR, "img")
                    alt_text = img.get_attribute("alt") or ""
                    href = link.get_attribute("href") or ""
                    membership_list.append(f"{alt_text} ({href})" if href else alt_text)
                except:
                    pass
            if membership_list:
                office_info["membership_details"] = "; ".join(membership_list)
        except:
            pass
    
    except TimeoutException:
        print("  ⚠ Office information section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping office information: {e}")
    
    return office_info

def scrape_review_summary(driver):
    """Extract review summary from dat-review-summary-section"""
    review_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        review_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-review-summary-section"))
        )
        
        # Extract rating
        try:
            rating_elem = review_section.find_element(By.CSS_SELECTOR, 
                "div[itemprop='ratingValue'], .bv_avgRating_component_container")
            review_info["average_rating"] = rating_elem.text.strip()
        except:
            review_info["average_rating"] = ""
        
        # Extract review count
        try:
            review_count_elem = review_section.find_element(By.CSS_SELECTOR,
                "meta[itemprop='reviewCount'], .bv_numReviews_text")
            if review_count_elem.tag_name == "meta":
                review_info["review_count"] = review_count_elem.get_attribute("content")
            else:
                review_info["review_count"] = review_count_elem.text.strip()
        except:
            review_info["review_count"] = ""
        
        # Extract percent recommend
        try:
            percent_elem = review_section.find_element(By.CSS_SELECTOR,
                ".bv_percentRecommend_component_container")
            review_info["percent_recommend"] = percent_elem.text.strip()
        except:
            review_info["percent_recommend"] = ""
    
    except TimeoutException:
        print("  ⚠ Review summary section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping review summary: {e}")
    
    return review_info

def scrape_general_information(driver):
    """Extract general information from dat-dot-profile section"""
    general_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        general_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-dot-profile"))
        )
        
        field_mappings = {
            "docket": "general_docket",
            "authorityName": "authority_name",
            "dotNumber": "general_dot_number",
            "intrastateNumber": "general_intrastate_number",
            "entityType": "entity_type",
            "dbaName": "dba_name",
            "operationType": "general_operation_type",
            "operatingStatus": "general_operating_status",
            "businessAddressData": "business_address",
            "powerUnits": "power_units",
            "drivers": "drivers",
            "businessPhone": "business_phone",
            "businessFax": "business_fax",
            "mcs150FormDate": "mcs150_form_date",
            "scacCodes": "scac_codes",
            "mailingAddressData": "mailing_address",
            "mcs150Mileage": "mcs150_mileage",
            "mailingPhone": "mailing_phone",
            "mailingFax": "mailing_fax",
            "outOfInterstateServices": "out_of_interstate_services",
            "commodities": "commodities",
            "specialCommodities": "special_commodities"
        }
        
        general_info = extract_fields_from_grid(general_section, field_mappings, driver)
    
    except TimeoutException:
        print("  ⚠ General information section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping general information: {e}")
    
    return general_info

def scrape_dot_authority_insurance(driver):
    """Extract DOT Authority & Insurance from dat-dot-authority-and-insurance section"""
    dot_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        dot_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-dot-authority-and-insurance"))
        )
        
        # Extract subtitle (date)
        try:
            subtitle = dot_section.find_element(By.CSS_SELECTOR, ".section-header__subtitle")
            dot_info["dot_info_date"] = subtitle.text.strip()
        except:
            pass
        
        # Extract DOT Authority Status
        try:
            authority_grid = dot_section.find_element(By.CSS_SELECTOR, 
                ".child-section__authority-status dat-fields-grid")
            
            # Extract authority types and statuses
            authority_cells = authority_grid.find_elements(By.CSS_SELECTOR,
                "div.fields-grid__cell--authority.fields-grid__cell--value")
            status_cells = authority_grid.find_elements(By.CSS_SELECTOR,
                "div.fields-grid__cell--status.fields-grid__cell--value")
            pending_cells = authority_grid.find_elements(By.CSS_SELECTOR,
                "div.fields-grid__cell--applicationPending.fields-grid__cell--value")
            
            authorities = []
            for i, auth_cell in enumerate(authority_cells):
                auth_type = auth_cell.text.strip()
                status = status_cells[i].text.strip() if i < len(status_cells) else ""
                pending = pending_cells[i].text.strip() if i < len(pending_cells) else ""
                authorities.append(f"{auth_type}: {status} (Pending: {pending})")
            
            if authorities:
                dot_info["dot_authorities"] = "; ".join(authorities)
            
            # Extract Freight, Passenger, HHG
            try:
                freight = authority_grid.find_element(By.CSS_SELECTOR,
                    "div.fields-grid__cell--freight.fields-grid__cell--value").text.strip()
                passenger = authority_grid.find_element(By.CSS_SELECTOR,
                    "div.fields-grid__cell--passenger.fields-grid__cell--value").text.strip()
                hhg = authority_grid.find_element(By.CSS_SELECTOR,
                    "div.fields-grid__cell--hhg.fields-grid__cell--value").text.strip()
                dot_info["freight"] = freight
                dot_info["passenger"] = passenger
                dot_info["hhg"] = hhg
            except:
                pass
        except:
            pass
        
        # Extract DOT Insurance Requirements
        try:
            insurance_req_grid = dot_section.find_element(By.CSS_SELECTOR,
                ".child-section__insurance-requirements dat-fields-grid")
            
            ins_type_cells = insurance_req_grid.find_elements(By.CSS_SELECTOR,
                "div.fields-grid__cell--insType.fields-grid__cell--value")
            required_cells = insurance_req_grid.find_elements(By.CSS_SELECTOR,
                "div.fields-grid__cell--required.fields-grid__cell--value")
            on_file_cells = insurance_req_grid.find_elements(By.CSS_SELECTOR,
                "div.fields-grid__cell--onFile.fields-grid__cell--value")
            
            insurance_reqs = []
            for i, ins_type in enumerate(ins_type_cells):
                ins_type_text = ins_type.text.strip()
                required = required_cells[i].text.strip() if i < len(required_cells) else ""
                on_file = on_file_cells[i].text.strip() if i < len(on_file_cells) else ""
                insurance_reqs.append(f"{ins_type_text}: Required={required}, On File={on_file}")
            
            if insurance_reqs:
                dot_info["insurance_requirements"] = "; ".join(insurance_reqs)
        except:
            pass
        
        # Extract Active/Pending Insurance
        try:
            insurance_items = dot_section.find_elements(By.CSS_SELECTOR,
                "dat-dot-insurances .dot-insurance__item")
            
            insurance_list = []
            for item in insurance_items:
                try:
                    # Insurance name
                    name_elem = item.find_element(By.CSS_SELECTOR,
                        "div.fields-grid__cell--name.fields-grid__cell--value")
                    insurance_name = name_elem.text.strip()
                    
                    # Policy
                    try:
                        policy = item.find_element(By.CSS_SELECTOR,
                            "div.fields-grid__cell--policy.fields-grid__cell--value").text.strip()
                    except:
                        policy = ""
                    
                    # Coverage
                    try:
                        coverage_from = item.find_element(By.CSS_SELECTOR,
                            "div.fields-grid__cell--coverageFrom.fields-grid__cell--value").text.strip()
                        coverage_to = item.find_element(By.CSS_SELECTOR,
                            "div.fields-grid__cell--coverageTo.fields-grid__cell--value").text.strip()
                        coverage = f"{coverage_from} - {coverage_to}"
                    except:
                        coverage = ""
                    
                    # Insurance carrier
                    try:
                        carrier = item.find_element(By.CSS_SELECTOR,
                            ".dot-insurance__company-name").text.strip()
                    except:
                        carrier = ""
                    
                    # Effective date
                    try:
                        effective_date = item.find_element(By.CSS_SELECTOR,
                            "div.fields-grid__cell--effectiveDate.fields-grid__cell--value").text.strip()
                    except:
                        effective_date = ""
                    
                    insurance_list.append({
                        "name": insurance_name,
                        "policy": policy,
                        "coverage": coverage,
                        "carrier": carrier,
                        "effective_date": effective_date
                    })
                except:
                    continue
            
            if insurance_list:
                dot_info["active_insurance"] = json.dumps(insurance_list, ensure_ascii=False)
        except:
            pass
    
    except TimeoutException:
        print("  ⚠ DOT Authority & Insurance section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping DOT Authority & Insurance: {e}")
    
    return dot_info

def scrape_insurance_information(driver):
    """Extract insurance information from dat-insurance-information section"""
    insurance_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        insurance_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-insurance-information"))
        )
        
        field_mappings = {
            "quickRating": "quick_rating",
            "cargoStatus": "cargo_status",
            "phone": "insurance_phone",
            "fax": "insurance_fax",
            "inTheNameOf": "insurance_in_name_of",
            "addressData": "insurance_address",
            "email": "insurance_email",
            "cargoLimits": "insurance_cargo_limits"
        }
        
        insurance_info = extract_fields_from_grid(insurance_section, field_mappings, driver)
    
    except TimeoutException:
        print("  ⚠ Insurance information section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping insurance information: {e}")
    
    return insurance_info

def scrape_company_background(driver):
    """Extract company background from dat-background-information section"""
    background_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        background_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-background-information"))
        )
        
        field_mappings = {
            "primaryContact": "primary_contact",
            "primaryTitle": "primary_title",
            "corporationType": "corporation_type",
            "phone": "background_phone",
            "fax": "background_fax",
            "emailAddress": "background_email",
            "addressData": "background_address",
            "yearFounded": "background_year_founded",
            "scacCodes": "background_scac_codes",
            "veteranOwned": "veteran_owned",
            "womanOwned": "woman_owned",
            "minorityOwned": "minority_owned",
            "minorityType": "minority_type",
            "certifiedByNmsdc": "certified_by_nmsdc",
            "locations": "other_locations",
            "background": "background_text"
        }
        
        background_info = extract_fields_from_grid(background_section, field_mappings, driver)
    
    except TimeoutException:
        print("  ⚠ Company background section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping company background: {e}")
    
    return background_info

def scrape_services(driver):
    """Extract services from dat-services section"""
    services_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        services_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-services"))
        )
        
        # Extract services list
        try:
            services_list = services_section.find_elements(By.CSS_SELECTOR,
                ".section-list dat-list-item")
            services = []
            for service_item in services_list:
                service_text = driver.execute_script("""
                    var item = arguments[0];
                    var text = '';
                    for (var i = 0; i < item.childNodes.length; i++) {
                        var node = item.childNodes[i];
                        if (node.nodeType === 3) {
                            text += node.textContent;
                        } else if (node.nodeType === 1 && !node.classList.contains('mat-icon')) {
                            text += node.textContent || node.innerText || '';
                        }
                    }
                    return text.trim();
                """, service_item)
                if service_text:
                    services.append(service_text)
            
            if services:
                services_info["services"] = "; ".join(services)
        except:
            services_info["services"] = ""
        
        # Extract additional fields
        field_mappings = {
            "portsServiced": "ports_serviced",
            "airportsServiced": "airports_serviced",
            "railServiced": "railway_yards_serviced",
            "specialServices": "other_special_services"
        }
        
        additional_info = extract_fields_from_grid(services_section, field_mappings, driver)
        services_info.update(additional_info)
    
    except TimeoutException:
        print("  ⚠ Services section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping services: {e}")
    
    return services_info

def scrape_safety_data(driver):
    """Extract safety data from dat-safety-data section"""
    safety_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        safety_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-safety-data"))
        )
        
        field_mappings = {
            "currentRating": "safety_rating_current",
            "currentEffectiveDate": "safety_rating_effective_date",
            "reviewType": "safety_review_type",
            "reviewDate": "safety_review_date",
            "previousRating": "safety_rating_previous",
            "previousEffectiveDate": "safety_rating_previous_effective_date",
            "csaLink": "safety_csa_link"
        }
        
        safety_info = extract_fields_from_grid(safety_section, field_mappings, driver)
        
        # Extract inspection and crash data from sub-sections
        try:
            # Total inspections
            total_inspections = safety_section.find_elements(By.CSS_SELECTOR, ".total-inspections .fields-grid__cell--total.fields-grid__cell--value")
            if total_inspections:
                inspection_values = [cell.text.strip() for cell in total_inspections]
                safety_info["total_inspections"] = "; ".join(inspection_values)
        except:
            pass
        
        try:
            # Crashes
            crashes = safety_section.find_elements(By.CSS_SELECTOR, ".crashes .fields-grid__cell--total.fields-grid__cell--value")
            if crashes:
                crash_values = [cell.text.strip() for cell in crashes]
                safety_info["total_crashes"] = "; ".join(crash_values)
        except:
            pass
        
    except TimeoutException:
        print("  ⚠ Safety data section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping safety data: {e}")
    
    return safety_info

def scrape_smartway_data(driver):
    """Extract SmartWay data from dat-smartway section"""
    smartway_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        smartway_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-smartway"))
        )
        
        # Check if there's data or "No data provided"
        try:
            no_data = smartway_section.find_element(By.CSS_SELECTOR, ".no-data")
            if no_data:
                smartway_info["smartway_data"] = "No data provided"
        except:
            # Try to extract fields if data exists
            field_mappings = {
                "participationStatus": "smartway_participation_status",
                "partnerType": "smartway_partner_type"
            }
            smartway_info = extract_fields_from_grid(smartway_section, field_mappings, driver)
    
    except TimeoutException:
        print("  ⚠ SmartWay data section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping SmartWay data: {e}")
    
    return smartway_info

def scrape_equipment_information(driver):
    """Extract equipment information from dat-equipment-information section"""
    equipment_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        equipment_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-equipment-information"))
        )
        
        field_mappings = {
            "powerUnits": "equipment_power_units",
            "ownerOperators": "equipment_owner_operators",
            "companyDrivers": "equipment_company_drivers",
            "numberOfTeams": "equipment_number_of_teams",
            "onBoardCommunications": "equipment_onboard_communications",
            "satelliteTracking": "equipment_satellite_tracking",
            "types": "equipment_types"
        }
        
        equipment_info = extract_fields_from_grid(equipment_section, field_mappings, driver)
    
    except TimeoutException:
        print("  ⚠ Equipment information section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping equipment information: {e}")
    
    return equipment_info

def scrape_high_risk_freight(driver):
    """Extract high-risk freight from dat-high-risk-freight section"""
    highrisk_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        highrisk_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-high-risk-freight"))
        )
        
        # Check if there's data or "No data provided"
        try:
            no_data = highrisk_section.find_element(By.CSS_SELECTOR, ".no-data")
            if no_data:
                highrisk_info["high_risk_freight"] = "No data provided"
        except:
            # Try to extract fields if data exists
            field_mappings = {
                "hazmat": "high_risk_hazmat",
                "explosives": "high_risk_explosives"
            }
            highrisk_info = extract_fields_from_grid(highrisk_section, field_mappings, driver)
    
    except TimeoutException:
        print("  ⚠ High-risk freight section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping high-risk freight: {e}")
    
    return highrisk_info

def scrape_certificates(driver):
    """Extract certificates from dat-certificates section"""
    certificates_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        certificates_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-certificates"))
        )
        
        # Check if there's data or "No data provided"
        try:
            no_data = certificates_section.find_element(By.CSS_SELECTOR, ".no-data")
            if no_data:
                certificates_info["certificates"] = "No data provided"
        except:
            # Try to extract certificate list items
            try:
                cert_items = certificates_section.find_elements(By.CSS_SELECTOR, "dat-list-item")
                if cert_items:
                    certs = []
                    for item in cert_items:
                        cert_text = driver.execute_script("""
                            var item = arguments[0];
                            var text = '';
                            for (var i = 0; i < item.childNodes.length; i++) {
                                var node = item.childNodes[i];
                                if (node.nodeType === 3) {
                                    text += node.textContent;
                                } else if (node.nodeType === 1 && !node.classList.contains('mat-icon')) {
                                    text += node.textContent || node.innerText || '';
                                }
                            }
                            return text.trim();
                        """, item)
                        if cert_text:
                            certs.append(cert_text)
                    if certs:
                        certificates_info["certificates"] = "; ".join(certs)
            except:
                pass
    
    except TimeoutException:
        print("  ⚠ Certificates section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping certificates: {e}")
    
    return certificates_info

def scrape_cross_border_certifications(driver):
    """Extract cross-border certifications from dat-cross-border-certifications section"""
    crossborder_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        crossborder_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-cross-border-certifications"))
        )
        
        field_mappings = {
            "fastCertified": "crossborder_fast_certified",
            "csaApproved": "crossborder_csa_approved",
            "serviceToMexico": "crossborder_service_to_mexico",
            "partnersInProtection": "crossborder_partners_in_protection",
            "C_TPAT": "crossborder_ctpat",
            "ACE": "crossborder_ace"
        }
        
        crossborder_info = extract_fields_from_grid(crossborder_section, field_mappings, driver)
    
    except TimeoutException:
        print("  ⚠ Cross-border certifications section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping cross-border certifications: {e}")
    
    return crossborder_info

def scrape_operating_areas(driver):
    """Extract operating areas from dat-operating-areas section"""
    operating_areas_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        operating_areas_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-operating-areas"))
        )
        
        # Check if there's data or "No data provided"
        try:
            no_data = operating_areas_section.find_element(By.CSS_SELECTOR, ".no-data")
            if no_data:
                operating_areas_info["operating_areas"] = "No data provided"
        except:
            # Try to extract areas list
            try:
                area_items = operating_areas_section.find_elements(By.CSS_SELECTOR, "dat-list-item")
                if area_items:
                    areas = []
                    for item in area_items:
                        area_text = driver.execute_script("""
                            var item = arguments[0];
                            var text = '';
                            for (var i = 0; i < item.childNodes.length; i++) {
                                var node = item.childNodes[i];
                                if (node.nodeType === 3) {
                                    text += node.textContent;
                                } else if (node.nodeType === 1 && !node.classList.contains('mat-icon')) {
                                    text += node.textContent || node.innerText || '';
                                }
                            }
                            return text.trim();
                        """, item)
                        if area_text:
                            areas.append(area_text)
                    if areas:
                        operating_areas_info["operating_areas"] = "; ".join(areas)
            except:
                pass
    
    except TimeoutException:
        print("  ⚠ Operating areas section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping operating areas: {e}")
    
    return operating_areas_info

def scrape_preferred_lanes(driver):
    """Extract preferred lanes from dat-preferred-lanes section"""
    preferred_lanes_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        preferred_lanes_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-preferred-lanes"))
        )
        
        # Check if there's data or "No data provided"
        try:
            no_data = preferred_lanes_section.find_element(By.CSS_SELECTOR, ".no-data")
            if no_data:
                preferred_lanes_info["preferred_lanes"] = "No data provided"
        except:
            # Try to extract lanes list
            try:
                lane_items = preferred_lanes_section.find_elements(By.CSS_SELECTOR, "dat-list-item")
                if lane_items:
                    lanes = []
                    for item in lane_items:
                        lane_text = driver.execute_script("""
                            var item = arguments[0];
                            var text = '';
                            for (var i = 0; i < item.childNodes.length; i++) {
                                var node = item.childNodes[i];
                                if (node.nodeType === 3) {
                                    text += node.textContent;
                                } else if (node.nodeType === 1 && !node.classList.contains('mat-icon')) {
                                    text += node.textContent || node.innerText || '';
                                }
                            }
                            return text.trim();
                        """, item)
                        if lane_text:
                            lanes.append(lane_text)
                    if lanes:
                        preferred_lanes_info["preferred_lanes"] = "; ".join(lanes)
            except:
                pass
    
    except TimeoutException:
        print("  ⚠ Preferred lanes section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping preferred lanes: {e}")
    
    return preferred_lanes_info

def scrape_credit_profile(driver):
    """Extract credit profile from dat-credit-profile section - IMPORTANT for creditScore and daysToPay"""
    credit_info = {}
    try:
        wait = WebDriverWait(driver, 10)
        credit_section = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-credit-profile"))
        )
        
        field_mappings = {
            "name": "credit_name_on_record",
            "creditScore": "credit_score",  # This is the main credit score field
            "daysToPay": "days_to_pay",  # This is the main days to pay field
            "creditHistory": "credit_history",
            "orderCredit": "order_credit_from"
        }
        
        credit_info = extract_fields_from_grid(credit_section, field_mappings, driver)
        
        # Also try to get credit score and days to pay directly if not found via grid
        # Try multiple selectors for credit score
        if not credit_info.get("credit_score"):
            try:
                credit_score_elem = credit_section.find_element(By.CSS_SELECTOR, 
                    ".fields-grid__cell--creditScore.fields-grid__cell--value")
                credit_info["credit_score"] = credit_score_elem.text.strip()
            except:
                try:
                    # Try alternative selector
                    credit_score_elem = credit_section.find_element(By.CSS_SELECTOR,
                        "div.fields-grid__cell--creditScore.fields-grid__cell--value")
                    credit_info["credit_score"] = credit_score_elem.text.strip()
                except:
                    pass
        
        # Try multiple selectors for days to pay
        if not credit_info.get("days_to_pay"):
            try:
                days_to_pay_elem = credit_section.find_element(By.CSS_SELECTOR,
                    ".fields-grid__cell--daysToPay.fields-grid__cell--value")
                credit_info["days_to_pay"] = days_to_pay_elem.text.strip()
            except:
                try:
                    # Try alternative selector
                    days_to_pay_elem = credit_section.find_element(By.CSS_SELECTOR,
                        "div.fields-grid__cell--daysToPay.fields-grid__cell--value")
                    credit_info["days_to_pay"] = days_to_pay_elem.text.strip()
                except:
                    pass
        
        # Debug: print what we extracted
        if credit_info:
            print(f"    Credit profile extracted: {list(credit_info.keys())}")
    
    except TimeoutException:
        print("  ⚠ Credit profile section not found")
    except Exception as e:
        print(f"  ⚠ Error scraping credit profile: {e}")
    
    return credit_info

def scrape_detail_page(driver):
    """Scrape detailed data from a company/load detail page"""
    detail_data = {
        "url": driver.current_url,
        "title": driver.title,
        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
    }
    
    wait = WebDriverWait(driver, 10)
    
    try:
        # Wait for page to load and verify we have content
        time.sleep(2)
        
        # Verify the page has loaded by checking for key elements
        try:
            wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "dat-company-profile, dat-basic-information")))
            print("  ✓ Page content verified")
        except TimeoutException:
            print("  ⚠ Warning: Page content may not have loaded")
            # Save page source for debugging
            try:
                with open("detail_page_timeout.html", "w", encoding="utf-8") as f:
                    f.write(driver.page_source)
                print("  Saved timeout page source to detail_page_timeout.html")
            except:
                pass
        
        # Extract basic information from dat-basic-information section
        print("  Extracting basic information...")
        basic_info = scrape_basic_information(driver)
        print(f"    Extracted {len(basic_info)} basic info fields")
        detail_data.update(basic_info)
        
        # Extract office information
        print("  Extracting office information...")
        office_info = scrape_office_information(driver)
        print(f"    Extracted {len(office_info)} office info fields")
        detail_data.update(office_info)
        
        # Extract review summary
        print("  Extracting review summary...")
        review_info = scrape_review_summary(driver)
        print(f"    Extracted {len(review_info)} review info fields")
        detail_data.update(review_info)
        
        # Extract general information
        print("  Extracting general information...")
        general_info = scrape_general_information(driver)
        print(f"    Extracted {len(general_info)} general info fields")
        detail_data.update(general_info)
        
        # Extract DOT Authority & Insurance
        print("  Extracting DOT Authority & Insurance...")
        dot_info = scrape_dot_authority_insurance(driver)
        print(f"    Extracted {len(dot_info)} DOT info fields")
        detail_data.update(dot_info)
        
        # Extract insurance information
        print("  Extracting insurance information...")
        insurance_info = scrape_insurance_information(driver)
        print(f"    Extracted {len(insurance_info)} insurance info fields")
        detail_data.update(insurance_info)
        
        # Extract company background
        print("  Extracting company background...")
        background_info = scrape_company_background(driver)
        print(f"    Extracted {len(background_info)} background info fields")
        detail_data.update(background_info)
        
        # Extract services
        print("  Extracting services...")
        services_info = scrape_services(driver)
        print(f"    Extracted {len(services_info)} services info fields")
        detail_data.update(services_info)
        
        # Extract safety data
        print("  Extracting safety data...")
        safety_info = scrape_safety_data(driver)
        print(f"    Extracted {len(safety_info)} safety info fields")
        detail_data.update(safety_info)
        
        # Extract SmartWay data
        print("  Extracting SmartWay data...")
        smartway_info = scrape_smartway_data(driver)
        print(f"    Extracted {len(smartway_info)} SmartWay info fields")
        detail_data.update(smartway_info)
        
        # Extract equipment information
        print("  Extracting equipment information...")
        equipment_info = scrape_equipment_information(driver)
        print(f"    Extracted {len(equipment_info)} equipment info fields")
        detail_data.update(equipment_info)
        
        # Extract high-risk freight
        print("  Extracting high-risk freight...")
        highrisk_info = scrape_high_risk_freight(driver)
        print(f"    Extracted {len(highrisk_info)} high-risk freight info fields")
        detail_data.update(highrisk_info)
        
        # Extract certificates
        print("  Extracting certificates...")
        certificates_info = scrape_certificates(driver)
        print(f"    Extracted {len(certificates_info)} certificates info fields")
        detail_data.update(certificates_info)
        
        # Extract cross-border certifications
        print("  Extracting cross-border certifications...")
        crossborder_info = scrape_cross_border_certifications(driver)
        print(f"    Extracted {len(crossborder_info)} cross-border info fields")
        detail_data.update(crossborder_info)
        
        # Extract operating areas
        print("  Extracting operating areas...")
        operating_areas_info = scrape_operating_areas(driver)
        print(f"    Extracted {len(operating_areas_info)} operating areas info fields")
        detail_data.update(operating_areas_info)
        
        # Extract preferred lanes
        print("  Extracting preferred lanes...")
        preferred_lanes_info = scrape_preferred_lanes(driver)
        print(f"    Extracted {len(preferred_lanes_info)} preferred lanes info fields")
        detail_data.update(preferred_lanes_info)
        
        # Extract credit profile (important for creditScore and daysToPay)
        print("  Extracting credit profile...")
        credit_profile_info = scrape_credit_profile(driver)
        print(f"    Extracted {len(credit_profile_info)} credit profile info fields")
        detail_data.update(credit_profile_info)
        
        # Count total fields extracted
        total_fields = len([k for k in detail_data.keys() if k not in ["url", "title", "timestamp", "scraping_error"]])
        print(f"  ✓ Scraped {total_fields} fields from all sections")
        
        # Debug: Print sample of extracted keys
        sample_keys = list(detail_data.keys())[:20]  # First 20 keys
        print(f"  Sample extracted fields: {sample_keys}")
        if total_fields < 10:
            print("  ⚠ WARNING: Very few fields extracted! This might indicate a problem.")
            print(f"  All extracted keys: {list(detail_data.keys())}")
        
    except Exception as e:
        print(f"  ⚠ Error scraping detail page: {e}")
        import traceback
        traceback.print_exc()
        detail_data["scraping_error"] = str(e)
    
    return detail_data

def flatten_dict_for_csv(data, parent_key='', sep='_'):
    """Flatten a nested dictionary for CSV export"""
    items = []
    for k, v in data.items():
        new_key = f"{parent_key}{sep}{k}" if parent_key else k
        if isinstance(v, dict):
            items.extend(flatten_dict_for_csv(v, new_key, sep=sep).items())
        elif isinstance(v, list):
            # Convert lists to string representation
            if v and isinstance(v[0], dict):
                # List of dicts - convert to JSON string
                items.append((new_key, json.dumps(v, ensure_ascii=False)))
            else:
                # List of strings/values - join with semicolon
                items.append((new_key, '; '.join(str(item) for item in v)))
        else:
            items.append((new_key, v))
    return dict(items)

def scrape_origin_details(driver, row_element):
    """Scrape origin details from the expanded row detail section"""
    origin_data = {}
    try:
        wait = WebDriverWait(driver, 5)  # Reduced timeout to prevent getting stuck
        
        # Find the expanded detail section (dat-load-details) - try multiple approaches
        detail_section = None
        
        # First try: find it relative to the row (it should be a sibling or child)
        try:
            # Try finding it as a sibling of the row
            detail_section = driver.find_element(By.CSS_SELECTOR, ".expanded-detail-row dat-load-details, .table-row-detail dat-load-details")
        except:
            try:
                # Try finding it within the row
                detail_section = row_element.find_element(By.CSS_SELECTOR, "dat-load-details")
            except:
                try:
                    # Try waiting for it to appear (it might be dynamically added)
                    detail_section = wait.until(
                        EC.presence_of_element_located((By.CSS_SELECTOR, "dat-load-details"))
                    )
                except TimeoutException:
                    try:
                        # Last resort: find any dat-load-details on the page
                        detail_section = driver.find_element(By.CSS_SELECTOR, "dat-load-details")
                    except:
                        print("  ⚠ Could not find expanded detail section (dat-load-details)")
                        return origin_data
        
        if not detail_section:
            print("  ⚠ Detail section is None")
            return origin_data
        
        # Extract Trip information from details-header (main trip display)
        try:
            details_header = detail_section.find_element(By.CSS_SELECTOR, "dat-details-header")
            try:
                trip_place = details_header.find_element(By.CSS_SELECTOR, ".trip-place")
                # Get origin and destination from trip-place
                try:
                    origin_dest_divs = trip_place.find_elements(By.CSS_SELECTOR, "div")
                    if len(origin_dest_divs) >= 2:
                        origin_data["header_origin"] = origin_dest_divs[0].text.strip()
                        origin_data["header_destination"] = origin_dest_divs[1].text.strip()
                except:
                    pass
                
                # Get trip miles from header
                try:
                    header_miles = details_header.find_element(By.CSS_SELECTOR, ".trip-miles").text.strip()
                    origin_data["header_trip_miles"] = header_miles
                except:
                    pass
            except:
                pass
        except:
            pass
        
        # Extract Trip information from dat-route section
        try:
            trip_section = detail_section.find_element(By.CSS_SELECTOR, "dat-route")
            # Origin and destination
            try:
                origin_city = trip_section.find_element(By.CSS_SELECTOR, ".route-origin .city").text.strip()
                origin_data["origin_city"] = origin_city
            except:
                origin_data["origin_city"] = ""
            
            try:
                origin_date = trip_section.find_element(By.CSS_SELECTOR, ".route-origin .date").text.strip()
                origin_data["origin_date"] = origin_date
            except:
                origin_data["origin_date"] = ""
            
            try:
                destination_city = trip_section.find_element(By.CSS_SELECTOR, ".route-destination .city").text.strip()
                origin_data["destination_city"] = destination_city
            except:
                origin_data["destination_city"] = ""
            
            try:
                trip_miles = trip_section.find_element(By.CSS_SELECTOR, ".trip-miles").text.strip()
                origin_data["trip_miles"] = trip_miles
            except:
                origin_data["trip_miles"] = ""
        except:
            pass
        
        # Extract Equipment information
        try:
            equipment_section = detail_section.find_element(By.CSS_SELECTOR, "dat-equipment")
            try:
                load_type = equipment_section.find_element(By.CSS_SELECTOR, ".equipment-data .data-item:nth-child(1)").text.strip()
                origin_data["load_type"] = load_type
            except:
                origin_data["load_type"] = ""
            
            try:
                truck_type = equipment_section.find_element(By.CSS_SELECTOR, ".equipment-data .data-item:nth-child(2)").text.strip()
                origin_data["truck_type"] = truck_type
            except:
                origin_data["truck_type"] = ""
            
            try:
                length = equipment_section.find_element(By.CSS_SELECTOR, ".equipment-data .data-item:nth-child(3)").text.strip()
                origin_data["length"] = length
            except:
                origin_data["length"] = ""
            
            try:
                weight = equipment_section.find_element(By.CSS_SELECTOR, ".equipment-data .data-item:nth-child(4)").text.strip()
                origin_data["weight"] = weight
            except:
                origin_data["weight"] = ""
            
            try:
                commodity = equipment_section.find_element(By.CSS_SELECTOR, ".equipment-data .data-item:nth-child(5)").text.strip()
                origin_data["commodity"] = commodity
            except:
                origin_data["commodity"] = ""
            
            try:
                reference_id = equipment_section.find_element(By.CSS_SELECTOR, ".equipment-data .data-item:nth-child(6)").text.strip()
                origin_data["reference_id"] = reference_id
            except:
                origin_data["reference_id"] = ""
        except:
            pass
        
        # Extract Rate information
        try:
            rate_section = detail_section.find_element(By.CSS_SELECTOR, "dat-rate")
            try:
                total_rate = rate_section.find_element(By.CSS_SELECTOR, ".data-item-total").text.strip()
                origin_data["total_rate"] = total_rate
            except:
                origin_data["total_rate"] = ""
            
            try:
                trip_distance = rate_section.find_element(By.CSS_SELECTOR, ".rate-data .data-item").text.strip()
                origin_data["trip_distance"] = trip_distance
            except:
                origin_data["trip_distance"] = ""
            
            try:
                rate_per_mile = rate_section.find_element(By.CSS_SELECTOR, ".data-item-ratemiles > div").text.strip()
                origin_data["rate_per_mile"] = rate_per_mile
            except:
                origin_data["rate_per_mile"] = ""
        except:
            pass
        
        # Extract Contact information
        try:
            contact_section = detail_section.find_element(By.CSS_SELECTOR, "dat-contacts")
            try:
                contact_phone = contact_section.find_element(By.CSS_SELECTOR, ".contacts__phone").text.strip()
                origin_data["contact_phone"] = contact_phone
            except:
                origin_data["contact_phone"] = ""
        except:
            pass
        
        # Extract Comments
        try:
            notes_section = detail_section.find_element(By.CSS_SELECTOR, "dat-notes")
            try:
                comments = notes_section.find_element(By.CSS_SELECTOR, ".notes-contents").text.strip()
                origin_data["comments"] = comments
            except:
                origin_data["comments"] = ""
        except:
            pass
        
        # Extract Company information (from origin details)
        try:
            company_section = detail_section.find_element(By.CSS_SELECTOR, "dat-company")
            try:
                company_name = company_section.find_element(By.CSS_SELECTOR, ".company-name .company-details").text.strip()
                origin_data["company_name"] = company_name
            except:
                origin_data["company_name"] = ""
            
            try:
                company_phone = company_section.find_element(By.CSS_SELECTOR, "a[href^='tel:']").text.strip()
                origin_data["company_phone"] = company_phone
            except:
                origin_data["company_phone"] = ""
            
            try:
                mc_number = company_section.find_element(By.CSS_SELECTOR, ".city-spacing").text.strip()
                origin_data["mc_number"] = mc_number
            except:
                origin_data["mc_number"] = ""
            
            try:
                credit_score = company_section.find_element(By.CSS_SELECTOR, ".credit-spacing span:last-child").text.strip()
                origin_data["credit_score"] = credit_score
            except:
                origin_data["credit_score"] = ""
            
            try:
                company_location = company_section.find_element(By.CSS_SELECTOR, ".row-spacing .city-spacing").text.strip()
                origin_data["company_location"] = company_location
            except:
                origin_data["company_location"] = ""
            
            try:
                days_to_pay = company_section.find_element(By.CSS_SELECTOR, ".credit-spacing:last-child span:last-child").text.strip()
                origin_data["days_to_pay"] = days_to_pay
            except:
                origin_data["days_to_pay"] = ""
        except:
            pass
        
        # Extract Market Rates
        try:
            market_rates_section = detail_section.find_element(By.CSS_SELECTOR, "dat-search-market-rates")
            try:
                spot_rate = market_rates_section.find_element(By.CSS_SELECTOR, ".spot .rate-data").text.strip()
                origin_data["spot_rate"] = spot_rate
            except:
                origin_data["spot_rate"] = ""
            
            try:
                spot_rate_per_mile = market_rates_section.find_element(By.CSS_SELECTOR, ".spot .rate-permile").text.strip()
                origin_data["spot_rate_per_mile"] = spot_rate_per_mile
            except:
                origin_data["spot_rate_per_mile"] = ""
            
            try:
                spot_range = market_rates_section.find_element(By.CSS_SELECTOR, ".spot .range-data span").text.strip()
                origin_data["spot_range"] = spot_range
            except:
                origin_data["spot_range"] = ""
        except:
            pass
    
    except Exception as e:
        print(f"  ⚠ Error scraping origin details: {e}")
        import traceback
        traceback.print_exc()
    
    return origin_data

def save_to_excel(origin_data_list, broker_data_list, filename="scraped_data.xlsx"):
    """Save scraped data to Excel file with two sheets: Origin Details and Broker Details"""
    if not OPENPYXL_AVAILABLE:
        print("✗ openpyxl not available. Install it with: pip install openpyxl")
        return False
    
    if not origin_data_list and not broker_data_list:
        print("No data to save to Excel")
        return False
    
    try:
        wb = Workbook()
        
        # Remove default sheet
        if "Sheet" in wb.sheetnames:
            wb.remove(wb["Sheet"])
        
        # Flatten data for both sheets
        flattened_origin = []
        for origin_data in origin_data_list:
            flattened = flatten_dict_for_csv(origin_data)
            flattened_origin.append(flattened)
        
        flattened_broker = []
        for broker_data in broker_data_list:
            flattened = flatten_dict_for_csv(broker_data)
            flattened_broker.append(flattened)
        
        # Create Origin Details sheet
        if flattened_origin:
            ws_origin = wb.create_sheet("Origin Details")
            origin_keys = set()
            for record in flattened_origin:
                origin_keys.update(record.keys())
            sorted_origin_keys = sorted(origin_keys)
            
            # Write headers
            for col_idx, key in enumerate(sorted_origin_keys, 1):
                ws_origin.cell(row=1, column=col_idx, value=key)
            
            # Write data
            for row_idx, record in enumerate(flattened_origin, 2):
                for col_idx, key in enumerate(sorted_origin_keys, 1):
                    value = record.get(key, '')
                    ws_origin.cell(row=row_idx, column=col_idx, value=value)
        
        # Create Broker Details sheet
        if flattened_broker:
            ws_broker = wb.create_sheet("Broker Details")
            broker_keys = set()
            for record in flattened_broker:
                broker_keys.update(record.keys())
            sorted_broker_keys = sorted(broker_keys)
            
            # Write headers
            for col_idx, key in enumerate(sorted_broker_keys, 1):
                ws_broker.cell(row=1, column=col_idx, value=key)
            
            # Write data
            for row_idx, record in enumerate(flattened_broker, 2):
                for col_idx, key in enumerate(sorted_broker_keys, 1):
                    value = record.get(key, '')
                    ws_broker.cell(row=row_idx, column=col_idx, value=value)
        
        # Save the workbook
        wb.save(filename)
        
        print(f"✓ Data saved to {filename}")
        if flattened_origin:
            print(f"  Origin Details sheet: {len(flattened_origin)} rows, {len(sorted_origin_keys)} columns")
        if flattened_broker:
            print(f"  Broker Details sheet: {len(flattened_broker)} rows, {len(sorted_broker_keys)} columns")
        
        return True
    except Exception as e:
        print(f"✗ Error saving to Excel: {e}")
        import traceback
        traceback.print_exc()
        return False

def scrape_search_loads(driver):
    """Scrape data from the search-loads page"""
    print(f"\nNavigating to {TARGET_URL}...")
    driver.get(TARGET_URL)
    
    wait = WebDriverWait(driver, 20)
    
    try:
        # Wait for the page to load
        print("Waiting for page to load...")
        time.sleep(5)
        
        # Check for "Login Anyway" modal in case it appears
        handle_login_anyway_modal(driver)
        
        # Wait for search form to appear
        wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "dat-search-form, form.search-form")))
        
        # Fill in search filters and click search
        if not fill_search_filters(driver):
            print("Failed to fill search filters")
            return None
        
        # Wait a bit more for results to fully load
        time.sleep(3)
        
        # Wait for results table to be present
        try:
            wait.until(
                EC.presence_of_element_located((By.CSS_SELECTOR, "[data-test='results-table-body'], .loads-table, dat-search-table, table"))
            )
            print("Results table found")
        except:
            print("⚠ Results table not found, continuing anyway...")
        
        # Find all table rows - try multiple selectors
        print("\nFinding table rows...")
        table_rows = []
        
        # Try different selectors - the table uses div.row-container, not tr
        selectors_to_try = [
            "div.row-container",
            ".row-container",
            "[id^='table-row-']",
            "dat-search-table-row",
            "tr[data-test='load-row']",
            "tr.table-row",
            "tbody tr",
            "table tr",
            "[data-test='results-table-body'] tr",
            ".loads-table tr"
        ]
        
        for selector in selectors_to_try:
            try:
                table_rows = driver.find_elements(By.CSS_SELECTOR, selector)
                if table_rows:
                    print(f"Found {len(table_rows)} table rows using selector: {selector}")
                    break
            except:
                continue
        
        # If still no rows, try finding company links and get their parent rows
        if not table_rows:
            print("Trying to find rows via company links...")
            try:
                company_links = driver.find_elements(By.CSS_SELECTOR, "a[data-test='load-company-cell']")
                if company_links:
                    print(f"Found {len(company_links)} company links, getting parent rows...")
                    for link in company_links:
                        try:
                            # Find the parent row-container (not tr, it's a div)
                            parent_row = link.find_element(By.XPATH, "./ancestor::div[contains(@class, 'row-container')][1]")
                            if parent_row and parent_row not in table_rows:
                                table_rows.append(parent_row)
                        except:
                            try:
                                # Alternative: find any ancestor div that might be a row
                                parent_row = link.find_element(By.XPATH, "./ancestor::div[@id[starts-with(., 'table-row-')]][1]")
                                if parent_row and parent_row not in table_rows:
                                    table_rows.append(parent_row)
                            except:
                                continue
                    if table_rows:
                        print(f"Found {len(table_rows)} rows via company links")
            except Exception as e:
                print(f"Error finding rows via company links: {e}")
                pass
        
        if not table_rows:
            print("No table rows found. Saving page source for inspection...")
            with open("page_source.html", "w", encoding="utf-8") as f:
                f.write(driver.page_source)
            print("Page source saved to page_source.html")
            return None
        
        print(f"Total table rows found: {len(table_rows)}")
        
        # Lists to store origin and broker data
        origin_details_data = []
        broker_details_data = []
        original_window = driver.current_window_handle
        total_rows = len(table_rows)
        
        # Process all entries one by one
        max_entries = total_rows  # Process all rows
        print(f"\n{'='*60}")
        print(f"Processing all {max_entries} listing(s) one by one...")
        print(f"{'='*60}\n")
        
        # Store the working selector for reuse
        working_selector = None
        for selector in selectors_to_try:
            try:
                test_rows = driver.find_elements(By.CSS_SELECTOR, selector)
                if test_rows:
                    working_selector = selector
                    break
            except:
                continue
        
        for i in range(min(max_entries, total_rows)):
            try:
                # Re-find table rows each time (page may reload)
                if working_selector:
                    table_rows = driver.find_elements(By.CSS_SELECTOR, working_selector)
                else:
                    # Fallback: try all selectors again
                    table_rows = []
                    for selector in selectors_to_try:
                        try:
                            found_rows = driver.find_elements(By.CSS_SELECTOR, selector)
                            if found_rows:
                                table_rows = found_rows
                                break
                        except:
                            continue
                    
                    # If still no rows, try via company links
                    if not table_rows:
                        try:
                            company_links = driver.find_elements(By.CSS_SELECTOR, "a[data-test='load-company-cell']")
                            if company_links:
                                for link in company_links:
                                    try:
                                        # Find parent row-container (div, not tr)
                                        parent_row = link.find_element(By.XPATH, "./ancestor::div[contains(@class, 'row-container')][1]")
                                        if parent_row and parent_row not in table_rows:
                                            table_rows.append(parent_row)
                                    except:
                                        try:
                                            # Alternative: find by ID
                                            parent_row = link.find_element(By.XPATH, "./ancestor::div[@id[starts-with(., 'table-row-')]][1]")
                                            if parent_row and parent_row not in table_rows:
                                                table_rows.append(parent_row)
                                        except:
                                            continue
                        except:
                            pass
                
                if i >= len(table_rows):
                    print(f"Only {len(table_rows)} rows found, stopping at index {i}")
                    break
                
                row = table_rows[i]
                
                print(f"\n--- Processing row {i+1}/{total_rows} ---")
                
                # Scroll to the row to ensure it's visible
                driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", row)
                time.sleep(1)
                
                # Step 1: Find and click on the origin element to expand the row
                print("Step 1: Clicking on origin to expand row...")
                origin_clicked = False
                
                # Try to find the origin element with class "origin" containing the city name
                try:
                    # First try: find div with class "origin" that contains span with "truncate extended-trip-point"
                    # The origin is in the route-dh-container-lg section
                    origin_element = row.find_element(By.CSS_SELECTOR, "div.origin, .route-dh-container-lg .origin, .origin span.truncate.extended-trip-point")
                    if origin_element.is_displayed():
                        # Scroll to origin element
                        driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", origin_element)
                        time.sleep(0.5)
                        # Click the origin
                        try:
                            origin_element.click()
                        except:
                            # Try clicking the span inside
                            try:
                                origin_span = origin_element.find_element(By.CSS_SELECTOR, "span.truncate.extended-trip-point, span")
                                origin_span.click()
                            except:
                                # Fallback to JavaScript click
                                driver.execute_script("arguments[0].click();", origin_element)
                        print("✓ Clicked origin element")
                        origin_clicked = True
                        time.sleep(2)  # Wait for expansion
                except:
                    # Try alternative selectors
                    origin_selectors = [
                        ".route-dh-container-lg .origin",
                        "div.origin span.truncate.extended-trip-point",
                        "div.origin",
                        ".origin",
                        "[data-test='load-origin-cell'] .origin",
                        "[data-test='load-origin-cell']",
                        ".route-container .origin"
                    ]
                    
                    for selector in origin_selectors:
                        try:
                            origin_element = row.find_element(By.CSS_SELECTOR, selector)
                            if origin_element.is_displayed():
                                driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", origin_element)
                                time.sleep(0.5)
                                try:
                                    origin_element.click()
                                except:
                                    driver.execute_script("arguments[0].click();", origin_element)
                                print(f"✓ Clicked origin using selector: {selector}")
                                origin_clicked = True
                                time.sleep(2)
                                break
                        except:
                            continue
                
                # If still not found, try clicking the route cell or row
                if not origin_clicked:
                    try:
                        # Try clicking the route/origin cell
                        route_cell = row.find_element(By.CSS_SELECTOR, ".cell-route, dat-route, [data-test='load-origin-cell']")
                        driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", route_cell)
                        time.sleep(0.5)
                        route_cell.click()
                        print("✓ Clicked route cell (fallback)")
                        origin_clicked = True
                        time.sleep(2)
                    except:
                        # Final fallback: click the row itself
                        try:
                            print("⚠ Origin element not found, clicking row directly...")
                            driver.execute_script("arguments[0].click();", row)
                            time.sleep(2)
                        except Exception as e:
                            print(f"⚠ Could not click row: {e}")
                
                # Wait for dat-load-details to appear after clicking
                if origin_clicked:
                    try:
                        WebDriverWait(driver, 5).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-load-details, .expanded-detail-row"))
                        )
                        print("✓ Detail section expanded")
                    except TimeoutException:
                        print("⚠ Detail section may not have expanded, continuing anyway...")
                
                # Step 2: Scrape origin details from expanded row
                print("Step 2: Scraping origin details from expanded row...")
                try:
                    # Wait a moment to ensure expansion is complete
                    time.sleep(1)
                    origin_data = scrape_origin_details(driver, row)
                    origin_data["row_index"] = i + 1
                    origin_details_data.append(origin_data)
                    print(f"✓ Origin details scraped: {len(origin_data)} fields")
                except Exception as e:
                    print(f"⚠ Error scraping origin details: {e}")
                    # Continue anyway with empty data
                    origin_data = {"row_index": i + 1}
                    origin_details_data.append(origin_data)
                
                # Step 3: Find and click the company link to go to detail page
                print("Step 3: Finding and clicking company link to go to detail page...")
                company_link_found = False
                try:
                    # Try to find company link in the row (might be in expanded section or in row itself)
                    company_link = None
                    
                    # First try in the row itself (with timeout)
                    try:
                        company_link = WebDriverWait(driver, 3).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, "a[data-test='load-company-cell']"))
                        )
                        # Make sure it's in the current row
                        if company_link not in row.find_elements(By.CSS_SELECTOR, "a[data-test='load-company-cell']"):
                            company_link = None
                    except (TimeoutException, NoSuchElementException):
                        pass
                    
                    # Try in expanded detail section
                    if not company_link:
                        try:
                            detail_section = row.find_element(By.CSS_SELECTOR, "dat-load-details")
                            company_link = detail_section.find_element(By.CSS_SELECTOR, "a[data-test='load-company-cell'], .directory, a[href*='directory'], a[href*='offices']")
                        except:
                            pass
                    
                    # Try finding by text content
                    if not company_link:
                        try:
                            all_links = driver.find_elements(By.CSS_SELECTOR, "a[data-test='load-company-cell']")
                            if all_links:
                                # Get the link that's closest to our row
                                for link in all_links:
                                    try:
                                        # Check if link is near our row
                                        link_row = link.find_element(By.XPATH, "./ancestor::tr[1]")
                                        if link_row == row:
                                            company_link = link
                                            break
                                    except:
                                        # If we can't find parent, use first one
                                        if not company_link:
                                            company_link = link
                            else:
                                # Try any link with company-related text
                                all_links = row.find_elements(By.CSS_SELECTOR, "a")
                                for link in all_links:
                                    link_text = link.text.strip().lower()
                                    if 'company' in link_text or 'directory' in link_text or 'view' in link_text:
                                        company_link = link
                                        break
                        except:
                            pass
                    
                    if company_link:
                        company_name = company_link.text.strip() if company_link.text.strip() else "Unknown"
                        print(f"Company: {company_name}")
                        
                        # Scroll to the link
                        driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", company_link)
                        time.sleep(0.5)
                        
                        # Get the href before clicking (for verification)
                        company_href = company_link.get_attribute("href") or ""
                        
                        # Click the company link
                        try:
                            company_link.click()
                        except:
                            # Fallback to JavaScript click
                            driver.execute_script("arguments[0].click();", company_link)
                        print(f"✓ Company link clicked - navigating to detail page (href: {company_href[:80]}...)")
                        company_link_found = True
                        
                        # Wait for navigation to start
                        time.sleep(1)
                    else:
                        print("⚠ Could not find company link, trying to find it in page...")
                        # Last resort: find any company link on the page
                        try:
                            all_company_links = driver.find_elements(By.CSS_SELECTOR, "a[data-test='load-company-cell']")
                            if all_company_links and i < len(all_company_links):
                                company_link = all_company_links[i]
                                company_name = company_link.text.strip() if company_link.text.strip() else "Unknown"
                                print(f"Found company link by index: {company_name}")
                                driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", company_link)
                                time.sleep(0.5)
                                company_link.click()
                                company_link_found = True
                        except:
                            pass
                        
                        if not company_link_found:
                            print("⚠ Could not find company link, skipping to next row")
                            continue
                except Exception as e:
                    print(f"⚠ Error finding/clicking company link: {e}")
                    import traceback
                    traceback.print_exc()
                    continue
                
                # Step 4: Wait for detail page to load - CRITICAL: Must wait for navigation to complete
                print("Step 4: Waiting for detail page to load...")
                
                # First, check if a new window/tab opened
                original_url = driver.current_url
                try:
                    windows = WebDriverWait(driver, 5).until(lambda d: d.window_handles)
                    if len(windows) > 1:
                        # Switch to new window
                        for window in windows:
                            if window != original_window:
                                driver.switch_to.window(window)
                                print("✓ Switched to new window")
                                break
                    else:
                        print("✓ Detail page opened in same window")
                except TimeoutException:
                    print("⚠ Timeout waiting for window change, continuing...")
                
                # CRITICAL: Wait for URL to change (navigation has started)
                print("  Waiting for URL to change (navigation in progress)...")
                try:
                    WebDriverWait(driver, 15).until(
                        lambda d: d.current_url != original_url and ("/offices/" in d.current_url or "/details/" in d.current_url)
                    )
                    new_url = driver.current_url
                    print(f"  ✓ URL changed to: {new_url[:100]}...")
                except TimeoutException:
                    print(f"  ⚠ URL did not change! Still on: {driver.current_url}")
                    print("  This might mean navigation failed. Saving page source for debugging...")
                    try:
                        with open("navigation_failed.html", "w", encoding="utf-8") as f:
                            f.write(driver.page_source)
                        print("  Saved page source to navigation_failed.html")
                    except:
                        pass
                
                # Wait for page to be in ready state
                print("  Waiting for page to be ready...")
                try:
                    WebDriverWait(driver, 15).until(
                        lambda d: d.execute_script("return document.readyState") == "complete"
                    )
                    print("  ✓ Page ready state: complete")
                except:
                    print("  ⚠ Page ready state check timeout")
                
                # Wait for detail page elements to appear
                print("  Waiting for detail page elements to appear...")
                try:
                    WebDriverWait(driver, 15).until(
                        EC.presence_of_element_located((By.CSS_SELECTOR, "dat-company-profile, dat-company-details, .company-details, dat-company-details-container"))
                    )
                    print("  ✓ Detail page container found")
                except TimeoutException:
                    print("  ⚠ Detail page container not found, trying alternative selectors...")
                    try:
                        WebDriverWait(driver, 5).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-basic-information, dat-company-header-tabs"))
                        )
                        print("  ✓ Found detail page elements (alternative)")
                    except:
                        print("  ⚠ Detail page elements not found!")
                        # Save page source for debugging
                        try:
                            with open("detail_page_not_loaded.html", "w", encoding="utf-8") as f:
                                f.write(driver.page_source)
                            print("  Saved page source to detail_page_not_loaded.html")
                        except:
                            pass
                
                # Additional wait for Angular to render
                print("  Waiting for Angular content to render...")
                time.sleep(3)
                
                # Final verification: Check we're actually on a detail page
                current_url = driver.current_url
                if "/offices/" in current_url or "/details/" in current_url:
                    print(f"  ✓ Confirmed: On detail page ({current_url[:100]}...)")
                else:
                    print(f"  ⚠ WARNING: May not be on detail page! URL: {current_url}")
                    print("  This could cause scraping to fail!")
                    # If we're still on search page, skip to next row
                    if "/search-loads" in current_url or "search" in current_url.lower():
                        print("  ✗ ERROR: Still on search page! Skipping this row...")
                        try:
                            with open(f"still_on_search_page_row_{i+1}.html", "w", encoding="utf-8") as f:
                                f.write(driver.page_source)
                            print(f"  Saved page source to still_on_search_page_row_{i+1}.html")
                        except:
                            pass
                        continue  # Skip to next row
                
                # Wait for detail page content to be visible (not just present)
                print("  Waiting for detail page content to be visible...")
                try:
                    WebDriverWait(driver, 10).until(
                        EC.visibility_of_element_located((By.CSS_SELECTOR, "dat-company-profile, dat-basic-information, dat-company-details"))
                    )
                    print("  ✓ Detail page content is visible")
                except TimeoutException:
                    print("  ⚠ Detail page content may not be visible yet")
                
                # Step 4.5: ALWAYS select Broker tab if available (MUST happen before scraping)
                print("Step 4.5: Checking for tabs and ALWAYS selecting Broker tab if available...")
                broker_tab_selected = False
                try:
                    # Wait for tabs to appear
                    time.sleep(2)
                    
                    # Find all tab links - try multiple selectors to be sure we find them
                    tab_links = []
                    selectors_to_try = [
                        "dat-company-header-tabs a.mat-tab-link",
                        "nav.mat-tab-nav-bar a.mat-tab-link",
                        "a.mat-tab-link",
                        ".mat-tab-link",
                        "dat-company-header-tabs .mat-tab-links a"
                    ]
                    
                    for selector in selectors_to_try:
                        try:
                            found_tabs = driver.find_elements(By.CSS_SELECTOR, selector)
                            if found_tabs:
                                tab_links = found_tabs
                                print(f"  Found {len(tab_links)} tab(s) using selector: {selector}")
                                # Print all tabs for debugging
                                for idx, tab in enumerate(tab_links):
                                    tab_text = tab.text.strip()
                                    tab_href = tab.get_attribute("href") or ""
                                    is_active = "mat-tab-label-active" in (tab.get_attribute("class") or "")
                                    print(f"    Tab {idx+1}: '{tab_text}' (active: {is_active}, href: {tab_href[:80]}...)")
                                break
                        except:
                            continue
                    
                    if tab_links:
                        broker_tab = None
                        broker_tab_href = None
                        
                        # Look for Broker tab - check both text and href more thoroughly
                        for tab in tab_links:
                            tab_text = tab.text.strip().lower()
                            tab_href = tab.get_attribute("href") or ""
                            
                            # Check if this is the broker tab - be more flexible
                            is_broker = (
                                "broker" in tab_text or 
                                "broker" in tab_href.lower() or
                                "/broker-mc-" in tab_href.lower() or
                                "broker-mc-" in tab_href.lower() or
                                "details/broker" in tab_href.lower()
                            )
                            
                            if is_broker:
                                broker_tab = tab
                                broker_tab_href = tab_href
                                print(f"  ✓ Found Broker tab: '{tab.text.strip()}' (href: {tab_href[:80]}...)")
                                break
                        
                        # If broker tab found, ALWAYS click it (even if it appears active)
                        if broker_tab:
                            try:
                                # Check if it's already active
                                is_active = "mat-tab-label-active" in (broker_tab.get_attribute("class") or "")
                                if is_active:
                                    print("  Broker tab appears active, but clicking to ensure content is loaded...")
                                
                                # Scroll to broker tab
                                driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", broker_tab)
                                time.sleep(0.5)
                                
                                # Try multiple methods to click/navigate to broker tab
                                clicked = False
                                
                                # Method 1: Normal click
                                try:
                                    broker_tab.click()
                                    clicked = True
                                    print("  ✓ Clicked Broker tab (normal click)")
                                except Exception as e1:
                                    print(f"  Normal click failed: {e1}")
                                    
                                    # Method 2: JavaScript click
                                    try:
                                        driver.execute_script("arguments[0].click();", broker_tab)
                                        clicked = True
                                        print("  ✓ Clicked Broker tab (JavaScript click)")
                                    except Exception as e2:
                                        print(f"  JavaScript click failed: {e2}")
                                        
                                        # Method 3: Navigate directly via href
                                        if broker_tab_href:
                                            try:
                                                print(f"  Attempting direct navigation to: {broker_tab_href}")
                                                driver.get(broker_tab_href)
                                                clicked = True
                                                print("  ✓ Navigated to Broker tab via URL")
                                            except Exception as e3:
                                                print(f"  Direct navigation failed: {e3}")
                                
                                if clicked:
                                    broker_tab_selected = True
                                    
                                    # Wait for broker content to load
                                    time.sleep(3)  # Wait for navigation/content update
                                    
                                    try:
                                        # Wait for the page to update
                                        WebDriverWait(driver, 10).until(
                                            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-company-profile, dat-basic-information"))
                                        )
                                        
                                        # Also wait for basic information section specifically
                                        WebDriverWait(driver, 5).until(
                                            EC.presence_of_element_located((By.CSS_SELECTOR, "dat-basic-information"))
                                        )
                                        print("  ✓ Broker tab content loaded and verified")
                                        
                                        # Verify we're on broker page
                                        verification_passed = False
                                        
                                        # Check 1: URL contains broker
                                        try:
                                            current_url = driver.current_url
                                            if "broker" in current_url.lower():
                                                print(f"  ✓ Confirmed: URL contains 'broker' ({current_url[:80]}...)")
                                                verification_passed = True
                                        except:
                                            pass
                                        
                                        # Check 2: Broker-specific fields exist
                                        try:
                                            basic_section = driver.find_element(By.CSS_SELECTOR, "dat-basic-information")
                                            if basic_section.find_elements(By.CSS_SELECTOR, ".fields-grid__cell--creditScore, .fields-grid__cell--daysToPay"):
                                                print("  ✓ Confirmed: Broker-specific fields (creditScore/daysToPay) detected")
                                                verification_passed = True
                                        except:
                                            pass
                                        
                                        # Check 3: Entity type is BROKER
                                        try:
                                            general_section = driver.find_element(By.CSS_SELECTOR, "dat-dot-profile")
                                            entity_type = general_section.find_element(By.CSS_SELECTOR, ".fields-grid__cell--entityType.fields-grid__cell--value")
                                            entity_text = entity_type.text.strip().upper()
                                            if "BROKER" in entity_text:
                                                print(f"  ✓ Confirmed: Entity type is {entity_type.text.strip()}")
                                                verification_passed = True
                                        except:
                                            pass
                                        
                                        # Check 4: Active tab is broker tab
                                        try:
                                            active_tab = driver.find_element(By.CSS_SELECTOR, "a.mat-tab-link.mat-tab-label-active")
                                            active_tab_text = active_tab.text.strip().lower()
                                            if "broker" in active_tab_text:
                                                print(f"  ✓ Confirmed: Active tab is '{active_tab.text.strip()}'")
                                                verification_passed = True
                                        except:
                                            pass
                                        
                                        if not verification_passed:
                                            print("  ⚠ WARNING: Could not verify broker tab is active!")
                                            
                                    except TimeoutException:
                                        print("  ⚠ Broker tab content may not have loaded completely, continuing anyway...")
                                        # Save page source for debugging
                                        try:
                                            with open("broker_tab_timeout.html", "w", encoding="utf-8") as f:
                                                f.write(driver.page_source)
                                            print("  Saved timeout page source to broker_tab_timeout.html")
                                        except:
                                            pass
                                else:
                                    print("  ⚠ ERROR: Could not click/navigate to Broker tab!")
                            except Exception as e:
                                print(f"  ⚠ Error clicking Broker tab: {e}")
                                import traceback
                                traceback.print_exc()
                        else:
                            print("  ⚠ No Broker tab found in tabs list")
                            # List all tabs for debugging
                            print(f"  Available tabs: {[tab.text.strip() for tab in tab_links]}")
                    else:
                        print("  ⚠ No tabs found on page - this might be a single-tab page")
                
                except Exception as e:
                    print(f"  ⚠ Error checking for tabs: {e}")
                    import traceback
                    traceback.print_exc()
                
                if not broker_tab_selected:
                    print("  ⚠ WARNING: Broker tab was NOT selected! This may cause incorrect data extraction.")
                    # Save page source for debugging
                    try:
                        with open("no_broker_tab_selected.html", "w", encoding="utf-8") as f:
                            f.write(driver.page_source)
                        print("  Saved page source to no_broker_tab_selected.html for debugging")
                    except:
                        pass
                else:
                    print("  ✓ Broker tab selection completed successfully")
                
                # Step 5: Scrape detailed data from the detail page (broker details)
                # IMPORTANT: This happens AFTER broker tab selection
                print("\nStep 5: Scraping broker details from detail page...")
                
                # CRITICAL CHECK: Verify we're actually on a detail page before proceeding
                current_url = driver.current_url
                print(f"  Current URL: {current_url}")
                
                # Check if we're still on search page (navigation failed)
                if "/search-loads" in current_url or "search" in current_url.lower():
                    print("  ✗ ERROR: Still on search page! Navigation to detail page failed!")
                    print("  Skipping scraping for this row...")
                    # Save page source for debugging
                    try:
                        with open(f"navigation_failed_row_{i+1}.html", "w", encoding="utf-8") as f:
                            f.write(driver.page_source)
                        print(f"  Saved page source to navigation_failed_row_{i+1}.html")
                    except:
                        pass
                    continue  # Skip to next row
                
                # Check if we're on a detail page
                if "/offices/" not in current_url and "/details/" not in current_url:
                    print("  ⚠ WARNING: URL doesn't look like a detail page!")
                    print("  Proceeding anyway, but scraping may fail...")
                
                # Final verification that broker tab is active before scraping
                try:
                    active_tab = driver.find_element(By.CSS_SELECTOR, "a.mat-tab-link.mat-tab-label-active")
                    active_tab_text = active_tab.text.strip().lower()
                    if "broker" not in active_tab_text:
                        print(f"  ⚠ WARNING: Active tab is '{active_tab.text.strip()}', not Broker tab!")
                        print("  Attempting to select Broker tab again...")
                        # Try one more time to click broker tab
                        try:
                            broker_tabs = driver.find_elements(By.CSS_SELECTOR, "a.mat-tab-link")
                            for tab in broker_tabs:
                                if "broker" in tab.text.strip().lower() or "broker" in (tab.get_attribute("href") or "").lower():
                                    driver.execute_script("arguments[0].click();", tab)
                                    time.sleep(3)  # Wait for content to load
                                    print("  ✓ Clicked Broker tab again")
                                    break
                        except:
                            pass
                    else:
                        print(f"  ✓ Confirmed: Active tab is '{active_tab.text.strip()}' (Broker tab)")
                except:
                    print("  Could not verify active tab (may not have tabs on this page)")
                
                try:
                    # Wait a bit more to ensure broker tab content is fully loaded
                    time.sleep(2)
                    
                    # Verify we're on the broker page by checking URL
                    current_url = driver.current_url
                    if "broker" not in current_url.lower():
                        print(f"  ⚠ WARNING: URL does not contain 'broker'! May not be on broker page.")
                    else:
                        print(f"  ✓ Confirmed: URL contains 'broker'")
                    
                    broker_data = scrape_detail_page(driver)
                    
                    # Check if we got any data
                    if not broker_data or len(broker_data) <= 3:  # Only has url, title, timestamp
                        print("  ⚠ Warning: No broker data extracted, page might not have loaded correctly")
                        # Save page source for debugging
                        with open(f"broker_page_debug_{i+1}.html", "w", encoding="utf-8") as f:
                            f.write(driver.page_source)
                        print(f"  Saved page source to broker_page_debug_{i+1}.html")
                    else:
                        print(f"  ✓ Extracted {len(broker_data)} broker data fields")
                except Exception as e:
                    print(f"⚠ Error scraping broker details: {e}")
                    import traceback
                    traceback.print_exc()
                    broker_data = {"row_index": i + 1, "error": str(e)}
                    # Save page source for debugging
                    try:
                        with open(f"broker_error_{i+1}.html", "w", encoding="utf-8") as f:
                            f.write(driver.page_source)
                        print(f"  Saved error page source to broker_error_{i+1}.html")
                    except:
                        pass
                
                # Add metadata
                broker_data["index"] = i + 1
                broker_data["company_name_from_link"] = company_name
                broker_data["row_index"] = i + 1
                
                broker_details_data.append(broker_data)
                print(f"✓ Broker details scraped: {driver.current_url}")
                print(f"  Extracted {len(broker_data)} data fields")
                
                # Navigate back to search page for next iteration
                print(f"\n--- Completed row {i+1}/{total_rows} ---")
                print("Navigating back to search page...")
                
                # Close the detail page if it opened in a new window
                if len(driver.window_handles) > 1:
                    driver.close()
                    driver.switch_to.window(original_window)
                    print("✓ Closed detail window and switched back to search page")
                    time.sleep(2)
                else:
                    # Navigate back if same window
                    try:
                        driver.back()
                        print("✓ Navigated back to search page")
                        time.sleep(3)
                        
                        # Wait for search results to reload
                        print("  Waiting for search results to reload...")
                        try:
                            WebDriverWait(driver, 15).until(
                                EC.presence_of_element_located((By.CSS_SELECTOR, "dat-search-table-row, div.row-container, tr[data-test='load-row'], table, .loads-table"))
                            )
                            print("  ✓ Search results page reloaded")
                            
                            # Additional wait for table rows to be visible
                            time.sleep(2)
                            
                            # Verify we're back on search page
                            current_url = driver.current_url
                            if "/search-loads" in current_url or "search" in current_url.lower():
                                print(f"  ✓ Confirmed: Back on search page ({current_url[:80]}...)")
                            else:
                                print(f"  ⚠ WARNING: May not be on search page! URL: {current_url}")
                                
                        except TimeoutException:
                            print("  ⚠ Search results may not have reloaded completely")
                            # Try to navigate to search page directly
                            try:
                                driver.get(TARGET_URL)
                                time.sleep(3)
                                print("  ✓ Navigated directly to search page")
                            except:
                                pass
                    except Exception as e:
                        print(f"  ⚠ Error navigating back: {e}")
                        # Try to navigate to search page directly
                        try:
                            driver.get(TARGET_URL)
                            time.sleep(3)
                            print("  ✓ Navigated directly to search page (fallback)")
                        except:
                            print("  ✗ ERROR: Could not navigate back to search page!")
                            break  # Stop processing if we can't get back
                
                # Small delay before processing next row
                time.sleep(1)
                
            except Exception as e:
                print(f"Error clicking company link {i+1}: {e}")
                import traceback
                traceback.print_exc()
                # Try to get back to the search results page
                try:
                    if len(driver.window_handles) > 1:
                        driver.close()
                        driver.switch_to.window(original_window)
                    else:
                        # Navigate back to search page
                        driver.get(TARGET_URL)
                        time.sleep(3)
                        fill_search_filters(driver)
                except Exception as recovery_error:
                    print(f"Error recovering: {recovery_error}")
                continue
        
        # Save scraped data to Excel with two sheets
        # Generate filename with current date

        current_datetime = datetime.now()
        date_time_str = current_datetime.strftime("%Y-%m-%d_%H-%M-%S")
        excel_filename = f"scraped_data_{date_time_str}.xlsx"
        
        if origin_details_data or broker_details_data:
            print(f"\n✓ Scraped {len(origin_details_data)} origin detail(s) and {len(broker_details_data)} broker detail(s)")
            save_to_excel(origin_details_data, broker_details_data, excel_filename)
            print(f"✓ Data saved to: {excel_filename}")
        else:
            print("\n⚠ No data was scraped")
        
        return {"origin_details": origin_details_data, "broker_details": broker_details_data}
        
    except Exception as e:
        print(f"Error during scraping: {e}")
        import traceback
        traceback.print_exc()
        # Save page source for debugging
        with open("error_page_source.html", "w", encoding="utf-8") as f:
            f.write(driver.page_source)
        print("Error page source saved to error_page_source.html")
        return None

def main(headless=False):
    """Main function to orchestrate the scraping process
    
    Args:
        headless: If True, run browser in headless mode (for server/cron use)
    """
    driver = None
    try:
        print("=" * 50)
        print("DAT Website Scraper")
        print(f"Started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        print("=" * 50)
        
        # Setup driver
        print("\n1. Setting up Chrome driver...")
        driver = setup_driver(headless=headless)
        
        # Login
        print("\n2. Logging in...")
        if not login(driver):
                print("Login failed. Exiting...")
                return
        
        # Scrape search loads page
        print("\n3. Scraping search loads page...")
        data = scrape_search_loads(driver)
        
        if data:
            print("\n✓ Scraping completed successfully!")
        else:
            print("\n✗ Scraping encountered issues. Check error files.")
        
        # Keep browser open for a few seconds to inspect (only if not headless)
        if not headless:
            print("\nKeeping browser open for 10 seconds for inspection...")
            time.sleep(10)
        
    except Exception as e:
        print(f"\nFatal error: {e}")
        import traceback
        traceback.print_exc()
        
    finally:
        if driver:
            print("\nClosing browser...")
            driver.quit()
            print("Done!")

if __name__ == "__main__":
    import sys
    # Check if running in headless mode (for cron/server use)
    headless_mode = "--headless" in sys.argv or os.getenv("HEADLESS", "").lower() == "true"
    main(headless=headless_mode)

