#!/usr/bin/env python3
"""
智能批量爬取器
为每个关键词启动浏览器,实时捕获商品搜索API,然后直接请求
"""

import os
import json
import time
import random
import logging
import requests
import re
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from urllib.parse import quote

# 配置
OUTPUT_DIR = "smart_batch_results"
LOG_DIR = "smart_batch_logs"
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(LOG_DIR, exist_ok=True)

# 日志配置
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
log_file = os.path.join(LOG_DIR, f"smart_batch_{timestamp}.log")

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[
        logging.FileHandler(log_file, encoding='utf-8'),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)

class SmartBatchScraper:
    def __init__(self):
        self.results = []
        self.failed_keywords = []
        self.chromedriver_path = '/data/data/com.termux/files/usr/bin/chromedriver'
        self.chromium_path = '/data/data/com.termux/files/usr/bin/chromium-browser'

    def setup_driver(self):
        """设置Selenium驱动"""
        chrome_options = Options()
        chrome_options.binary_location = self.chromium_path
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--disable-software-rasterizer')
        chrome_options.add_argument('--single-process')
        chrome_options.add_argument('--disable-blink-features=AutomationControlled')
        chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
        chrome_options.add_experimental_option('useAutomationExtension', False)

        # 启用Performance日志
        caps = DesiredCapabilities.CHROME.copy()
        caps['goog:loggingPrefs'] = {'performance': 'ALL'}

        for key, value in caps.items():
            chrome_options.set_capability(key, value)

        service = Service(executable_path=self.chromedriver_path)
        driver = webdriver.Chrome(service=service, options=chrome_options)

        # 隐藏webdriver特征
        driver.execute_cdp_cmd('Network.enable', {})
        driver.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {
            'source': '''
                Object.defineProperty(navigator, 'webdriver', {
                    get: () => undefined
                });
                Object.defineProperty(navigator, 'plugins', {
                    get: () => [1, 2, 3, 4, 5]
                });
            '''
        })

        return driver

    def capture_search_api(self, keyword):
        """为特定关键词捕获搜索API"""
        logger.info(f"  启动浏览器捕获API: {keyword}")

        driver = None
        try:
            driver = self.setup_driver()

            # 访问淘宝搜索页面
            search_url = f"https://s.taobao.com/search?q={quote(keyword)}"
            logger.info(f"  访问: {search_url}")

            driver.get(search_url)

            # 等待页面加载
            time.sleep(8)

            # 滚动页面触发更多API
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
            time.sleep(2)

            # 获取Performance日志
            logs = driver.get_log('performance')
            logger.info(f"  捕获到 {len(logs)} 条网络日志")

            # 过滤API请求
            api_requests = []
            for entry in logs:
                try:
                    log_entry = json.loads(entry['message'])
                    message = log_entry['message']

                    if message['method'] == 'Network.responseReceived':
                        response = message['params']['response']
                        url = response['url']
                        status = response['status']
                        mime_type = response.get('mimeType', '')

                        # 寻找搜索相关的API
                        search_indicators = [
                            's.taobao.com/search',  # 搜索页面本身
                            '/search',
                            '/itemsearch',
                            '/search_item',
                            'search.json',
                            'item.json',
                        ]

                        # 也包含返回JSON的h5api
                        if (any(indicator in url.lower() for indicator in search_indicators) or
                            ('h5api.m.taobao.com' in url and 'json' in mime_type)):

                            if status == 200:
                                api_requests.append({
                                    'url': url,
                                    'mimeType': mime_type,
                                    'status': status
                                })

                except Exception as e:
                    continue

            logger.info(f"  筛选出 {len(api_requests)} 个可能的搜索API")

            return api_requests

        except Exception as e:
            logger.error(f"  ✗ 捕获API失败: {e}")
            return []

        finally:
            if driver:
                driver.quit()

    def parse_jsonp(self, text):
        """解析JSONP"""
        try:
            return json.loads(text)
        except:
            match = re.search(r'[a-zA-Z0-9_]+\((.*)\)', text, re.DOTALL)
            if match:
                return json.loads(match.group(1))
            return None

    def request_api(self, url):
        """请求API"""
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Referer': 'https://s.taobao.com/',
            'Accept': 'application/json, text/javascript, */*',
        }

        try:
            response = requests.get(url, headers=headers, timeout=10)

            if response.status_code == 200:
                data = self.parse_jsonp(response.text)
                return data

        except Exception as e:
            logger.warning(f"    请求失败: {e}")

        return None

    def has_products(self, data):
        """检查是否包含商品"""
        if not isinstance(data, dict):
            return False

        def search_products(obj, depth=0):
            if depth > 6:
                return False

            if isinstance(obj, dict):
                # 检查商品字段
                for key in ['items', 'itemsArray', 'auctions', 'mainItems', 'resultList']:
                    if key in obj:
                        value = obj[key]
                        if isinstance(value, list) and len(value) > 0:
                            # 检查第一项是否像商品
                            first = value[0]
                            if isinstance(first, dict) and any(k in first for k in ['title', 'item_id', 'nid', 'raw_title']):
                                return True

                for value in obj.values():
                    if search_products(value, depth + 1):
                        return True

            elif isinstance(obj, list) and len(obj) > 0:
                if search_products(obj[0], depth + 1):
                    return True

            return False

        return search_products(data)

    def extract_products(self, data):
        """提取商品"""
        products = []

        def find_products(obj, depth=0):
            if depth > 6:
                return []

            if isinstance(obj, dict):
                for key in ['items', 'itemsArray', 'auctions', 'mainItems', 'resultList']:
                    if key in obj and isinstance(obj[key], list):
                        return obj[key]

                for value in obj.values():
                    result = find_products(value, depth + 1)
                    if result:
                        return result

            return []

        product_list = find_products(data)

        if product_list:
            for item in product_list:
                try:
                    product = {
                        'title': item.get('title') or item.get('raw_title', ''),
                        'price': item.get('price') or item.get('view_price', ''),
                        'sales': item.get('view_sales', ''),
                        'item_id': item.get('item_id') or item.get('nid', ''),
                        'shop': item.get('nick', ''),
                        'location': item.get('item_loc', ''),
                    }

                    if product['title']:
                        products.append(product)

                except Exception as e:
                    continue

        return products

    def scrape_keyword(self, keyword):
        """爬取单个关键词"""
        logger.info(f"\n处理关键词: {keyword}")

        try:
            # 1. 捕获API
            apis = self.capture_search_api(keyword)

            if not apis:
                logger.error(f"  ✗ 未捕获到API")
                self.failed_keywords.append({
                    'keyword': keyword,
                    'error': 'No API captured',
                    'timestamp': datetime.now().isoformat()
                })
                return None

            # 2. 尝试每个API
            for idx, api in enumerate(apis[:10], 1):  # 最多尝试10个
                logger.info(f"  [{idx}] 测试API: {api['url'][:80]}...")

                data = self.request_api(api['url'])

                if data and self.has_products(data):
                    logger.info(f"    ✓ 找到商品数据!")

                    products = self.extract_products(data)

                    if products:
                        result = {
                            'keyword': keyword,
                            'success': True,
                            'product_count': len(products),
                            'products': products[:20],  # 只保存前20个
                            'api_url': api['url'],
                            'timestamp': datetime.now().isoformat()
                        }

                        logger.info(f"✓ 成功: {keyword} - 提取 {len(products)} 个商品")
                        return result

            logger.warning(f"⚠ {keyword} - 所有API均无商品数据")
            self.failed_keywords.append({
                'keyword': keyword,
                'error': 'No product data in APIs',
                'timestamp': datetime.now().isoformat()
            })

        except Exception as e:
            logger.error(f"✗ 异常: {keyword} - {e}")
            self.failed_keywords.append({
                'keyword': keyword,
                'error': str(e),
                'timestamp': datetime.now().isoformat()
            })

        return None

    def batch_scrape(self, keywords_file='daily_goods_100.txt', max_keywords=10):
        """批量爬取(测试模式限制数量)"""
        logger.info("="*70)
        logger.info("智能批量爬取器")
        logger.info("="*70)

        with open(keywords_file, 'r', encoding='utf-8') as f:
            keywords = [line.strip() for line in f if line.strip()]

        # 限制数量(测试)
        keywords = keywords[:max_keywords]

        logger.info(f"\n加载了 {len(keywords)} 个关键词(测试模式)")
        logger.info(f"日志: {log_file}\n")

        start_time = time.time()

        for idx, keyword in enumerate(keywords, 1):
            logger.info(f"\n[{idx}/{len(keywords)}] ========== {keyword} ==========")

            result = self.scrape_keyword(keyword)

            if result:
                self.results.append(result)

            # 延迟
            delay = random.uniform(3, 6)
            logger.info(f"  延迟 {delay:.1f}秒...")
            time.sleep(delay)

        # 保存结果
        self.save_results()

        # 统计
        elapsed = time.time() - start_time
        logger.info("\n" + "="*70)
        logger.info("爬取完成!")
        logger.info("="*70)
        logger.info(f"总数: {len(keywords)}")
        logger.info(f"成功: {len(self.results)}")
        logger.info(f"失败: {len(self.failed_keywords)}")
        logger.info(f"总耗时: {elapsed/60:.1f}分钟")
        logger.info("="*70)

    def save_results(self):
        """保存结果"""
        try:
            # 完整结果
            result_file = os.path.join(OUTPUT_DIR, f"smart_batch_{timestamp}.json")
            with open(result_file, 'w', encoding='utf-8') as f:
                json.dump({
                    'summary': {
                        'successful': len(self.results),
                        'failed': len(self.failed_keywords),
                        'total_products': sum(r['product_count'] for r in self.results),
                    },
                    'results': self.results,
                    'failed': self.failed_keywords
                }, f, ensure_ascii=False, indent=2)

            logger.info(f"\n✓ 结果已保存: {result_file}")

            # 所有商品
            all_products = []
            for result in self.results:
                for product in result['products']:
                    product['keyword'] = result['keyword']
                    all_products.append(product)

            if all_products:
                products_file = os.path.join(OUTPUT_DIR, f"products_{timestamp}.json")
                with open(products_file, 'w', encoding='utf-8') as f:
                    json.dump(all_products, f, ensure_ascii=False, indent=2)

                logger.info(f"✓ 商品列表: {products_file} ({len(all_products)} 个)")

        except Exception as e:
            logger.error(f"✗ 保存失败: {e}")

def main():
    scraper = SmartBatchScraper()

    # 测试模式: 只爬10个关键词
    scraper.batch_scrape(max_keywords=10)

if __name__ == "__main__":
    main()
