# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
import csv
import os
from selectorlib import Extractor
import re
import json
from pathlib import Path

class AlibabaCrawlerSpider(scrapy.Spider):
    name = 'alibaba_crawler'
    allowed_domains = ['1688.com']
    start_urls = ['https://www.1688.com/']
    extractor = Extractor.from_yaml_file(os.path.join(os.path.dirname(__file__), "../resources/search_results.yml"))
    max_pages = 5

    def __init__(self, *args, **kwargs):
        super(AlibabaCrawlerSpider, self).__init__(*args, **kwargs)
        # Create directories for storing raw HTML and structured data
        self.html_dir = Path("data/html_raw")
        self.json_dir = Path("data/json_structured")
        self.html_dir.mkdir(parents=True, exist_ok=True)
        self.json_dir.mkdir(parents=True, exist_ok=True)
        self.page_counter = 0

    def start_requests(self):
        """Read keywords from keywords file and construct the 1688 search URL"""

        with open(os.path.join(os.path.dirname(__file__), "../resources/keywords.csv")) as search_keywords:
            for keyword in csv.DictReader(search_keywords):
                search_text=keyword["keyword"]
                # Modified URL for 1688.com search
                url="https://s.1688.com/selloffer/offer_search.htm?keywords={0}".format(
                    search_text)
                # The meta is used to send our search text into the parser as metadata
                yield scrapy.Request(url, callback = self.parse, meta = {"search_text": search_text, "page_num": 1})


    def parse(self, response):
        # Save raw HTML
        self.page_counter += 1
        page_num = response.meta.get('page_num', 1)
        search_text = response.meta.get('search_text', 'unknown')

        # Clean search text for filename
        clean_search = re.sub(r'[^\w\s-]', '', search_text).strip().replace(' ', '_')
        html_filename = self.html_dir / f"{clean_search}_page{page_num}_{self.page_counter}.html"

        # Save complete HTML content
        with open(html_filename, 'w', encoding='utf-8') as f:
            f.write(response.text)

        self.logger.info(f"Saved HTML to {html_filename}")

        # Parse the page using CSS selectors for 1688.com structure
        products = []
        # 1688.com uses different structure, extract product cards
        product_cards = response.css('.sw-offer-item, .sw-dpl-offer, .offer-item')

        for card in product_cards:
            product = {
                'name': card.css('.offer-item-title::text, .title-text::text').get(),
                'price': card.css('.price-text::text, .price::text').get(),
                'seller_name': card.css('.company-name::text, .sw-dpl-offer-company::text').get(),
                'link': response.urljoin(card.css('a::attr(href)').get()),
                'image': card.css('img::attr(src)').get(),
                'search_keyword': search_text,
                'page_number': page_num,
                'html_file': str(html_filename)
            }
            products.append(product)
            yield product

        # Save structured data as JSON
        json_filename = self.json_dir / f"{clean_search}_page{page_num}_{self.page_counter}.json"
        with open(json_filename, 'w', encoding='utf-8') as f:
            json.dump(products, f, ensure_ascii=False, indent=2)

        self.logger.info(f"Saved {len(products)} products to {json_filename}")

        # Pagination for 1688.com
        if len(products) > 0 and page_num < self.max_pages:
            next_page = page_num + 1
            # 1688 pagination uses beginPage parameter
            next_url = f"https://s.1688.com/selloffer/offer_search.htm?keywords={search_text}&beginPage={next_page}"
            yield Request(next_url, callback=self.parse, meta={"search_text": search_text, "page_num": next_page})