#!/usr/bin/env python3
"""
淘宝爬虫 - 调试模式
输出详细日志，保存页面截图和HTML
"""

import json
import time
import os
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup

# 创建调试输出目录
DEBUG_DIR = "debug_output"
os.makedirs(DEBUG_DIR, exist_ok=True)

def log(message, level="INFO"):
    """打印带时间戳的日志"""
    timestamp = datetime.now().strftime('%H:%M:%S')
    print(f"[{timestamp}] [{level}] {message}")

def setup_driver_debug():
    """配置Chrome驱动 - 调试模式"""
    log("开始配置Chrome驱动...")

    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无头模式
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--window-size=1920,1080')
    chrome_options.add_argument('user-agent=Mozilla/5.0 (Linux; Android 10; SM-G973F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Mobile Safari/537.36')

    # 禁用图片加载以提高速度（可选）
    # prefs = {"profile.managed_default_content_settings.images": 2}
    # chrome_options.add_experimental_option("prefs", prefs)

    try:
        driver = webdriver.Chrome(options=chrome_options)
        log("✓ Chrome驱动初始化成功", "SUCCESS")
        return driver
    except Exception as e:
        log(f"✗ Chrome驱动初始化失败: {e}", "ERROR")
        log("尝试检查chromium安装...", "INFO")
        return None

def save_page_source(driver, filename):
    """保存页面HTML源码"""
    filepath = os.path.join(DEBUG_DIR, filename)
    with open(filepath, 'w', encoding='utf-8') as f:
        f.write(driver.page_source)
    log(f"✓ 已保存页面HTML: {filepath}")

def save_screenshot(driver, filename):
    """保存页面截图"""
    try:
        filepath = os.path.join(DEBUG_DIR, filename)
        driver.save_screenshot(filepath)
        log(f"✓ 已保存截图: {filepath}")
    except Exception as e:
        log(f"✗ 截图失败: {e}", "ERROR")

def analyze_page_structure(html):
    """分析页面结构"""
    log("开始分析页面结构...")
    soup = BeautifulSoup(html, 'html.parser')

    # 统计各种标签
    stats = {
        'div': len(soup.find_all('div')),
        'a': len(soup.find_all('a')),
        'span': len(soup.find_all('span')),
        'img': len(soup.find_all('img')),
    }

    log(f"  页面标签统计: {stats}")

    # 查找可能的店铺相关类名
    all_classes = set()
    for tag in soup.find_all(class_=True):
        if isinstance(tag.get('class'), list):
            all_classes.update(tag.get('class'))

    shop_related = [c for c in all_classes if any(keyword in c.lower() for keyword in ['shop', 'item', 'card', 'list'])]
    log(f"  找到可能的相关CSS类 ({len(shop_related)}个):")
    for cls in sorted(shop_related)[:20]:
        log(f"    - {cls}")

    return shop_related

def scrape_with_debug(url):
    """调试模式爬取"""
    log("="*60)
    log("开始调试模式爬取")
    log("="*60)

    driver = setup_driver_debug()
    if not driver:
        log("驱动初始化失败，退出", "ERROR")
        return None

    all_data = []

    try:
        # 访问页面
        log(f"正在访问URL: {url[:100]}...")
        start_time = time.time()
        driver.get(url)
        load_time = time.time() - start_time
        log(f"✓ 页面加载完成 (耗时: {load_time:.2f}秒)")

        # 等待初始内容加载
        log("等待5秒让页面完全加载...")
        time.sleep(5)

        # 保存初始状态
        save_page_source(driver, "01_initial_load.html")
        save_screenshot(driver, "01_initial_load.png")

        # 分析页面结构
        shop_classes = analyze_page_structure(driver.page_source)

        # 滚动页面
        log("开始滚动页面加载更多内容...")
        for i in range(3):
            log(f"  滚动 {i+1}/3...")
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(2)

        save_page_source(driver, "02_after_scroll.html")
        save_screenshot(driver, "02_after_scroll.png")

        # 尝试提取数据
        log("开始提取数据...")
        soup = BeautifulSoup(driver.page_source, 'html.parser')

        # 尝试多种选择器策略
        selectors = [
            ('div', lambda x: x and 'shop' in x.lower()),
            ('div', lambda x: x and 'item' in x.lower()),
            ('a', lambda x: x and 'shop' in x.lower()),
            ('div', lambda x: x and 'card' in x.lower()),
        ]

        for tag, class_filter in selectors:
            elements = soup.find_all(tag, class_=class_filter)
            if elements:
                log(f"  策略 [{tag} + class过滤]: 找到 {len(elements)} 个元素")

                for idx, elem in enumerate(elements[:10], 1):
                    item = {
                        'index': idx,
                        'tag': elem.name,
                        'classes': ' '.join(elem.get('class', [])),
                        'text': elem.get_text(strip=True)[:100],
                        'html': str(elem)[:300]
                    }

                    # 尝试提取链接
                    link = elem.find('a') if elem.name != 'a' else elem
                    if link and link.get('href'):
                        item['url'] = link['href']

                    all_data.append(item)

                    if idx <= 3:
                        log(f"    元素 {idx}:")
                        log(f"      类名: {item['classes']}")
                        log(f"      文本: {item['text'][:50]}...")

                if all_data:
                    break

        if not all_data:
            log("⚠ 未能提取到结构化数据", "WARNING")
            log("  建议手动检查保存的HTML文件分析页面结构")
        else:
            log(f"✓ 成功提取 {len(all_data)} 条数据", "SUCCESS")

        # 保存提取的数据
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')

        if all_data:
            json_file = os.path.join(DEBUG_DIR, f"extracted_data_{timestamp}.json")
            with open(json_file, 'w', encoding='utf-8') as f:
                json.dump(all_data, f, ensure_ascii=False, indent=2)
            log(f"✓ 数据已保存到: {json_file}")

            # 打印数据摘要
            log("\n数据摘要:")
            for item in all_data[:5]:
                log(f"  - {item.get('text', 'N/A')[:60]}")

        # 保存调试报告
        report = {
            'timestamp': timestamp,
            'url': url,
            'load_time': load_time,
            'total_elements_found': len(all_data),
            'shop_related_classes': shop_classes[:20],
            'sample_data': all_data[:3]
        }

        report_file = os.path.join(DEBUG_DIR, f"debug_report_{timestamp}.json")
        with open(report_file, 'w', encoding='utf-8') as f:
            json.dump(report, f, ensure_ascii=False, indent=2)
        log(f"✓ 调试报告已保存: {report_file}")

    except Exception as e:
        log(f"✗ 爬取过程出错: {e}", "ERROR")
        import traceback
        log(traceback.format_exc(), "ERROR")

    finally:
        log("关闭浏览器...")
        driver.quit()
        log("✓ 浏览器已关闭")

    log("="*60)
    log("调试完成！")
    log(f"所有调试文件保存在: {DEBUG_DIR}/")
    log("="*60)

    return all_data

def main():
    url = "https://s.taobao.com/search?_input_charset=utf-8&commend=all&finalPage=13&ie=utf8&ie=utf8&initiative_id=tbindexz_20170306&preLoadOrigin=https%3A%2F%2Fwww.taobao.com&q=%E7%94%B5%E5%99%A8%E5%AE%B6%E7%94%A8%E5%A4%A7%E5%85%A8&search_type=shop&source=suggest&sourceId=tb.index&spm=a21bo.jianhua%2Fa.search_downSideRecommend.d1&ssid=s5-e&suggest=0_1&suggest_query=%E7%94%B5%E5%99%A8&tab=shop&wq=%E7%94%B5%E5%99%A8"

    scrape_with_debug(url)

if __name__ == "__main__":
    main()
