1
我是scrapy和python的新手,我很难理解流程。我不知道在哪里放置“爬到下一页”功能。我不知道它是否应该来后,我回调parse_data或在parse_data函数它自我scrapy抓取类别和页面
脚本逻辑: 类别中的类别,刮类别中的所有页面。
选项1:
import scrapy
class Amazon01Spider(scrapy.Spider):
name = 'amazon0.1'
allowed_domains = ['amazon.com']
start_urls = ['https://amazon.com/Books/s?ie=UTF8&page=1&rh=n%3A283155&srs=9187220011']
def parse(self, response):
cats = response.xpath('//*[@id="leftNavContainer"]//*[@class="a-unordered-list a-nostyle a-vertical s-ref-indent-two"]//li//@href').extract()
for cat in cats:
yield scrapy.Request("https://amazon.com/"+""+cat, callback = self.parse_data)
def parse_data(self, response):
items = response.xpath('//*[@class="a-fixed-left-grid-col a-col-right"]')
for item in items:
name = item.xpath('.//*[@class="a-row a-spacing-small"]/div/a/h2/text()').extract_first()
yield {'Name': name}
next_page_url = response.xpath('//*[@class="pagnLink"]/a/@href').extract_first()
yield scrapy.Request("https://amazon.com/"+""+next_page_url, callback = self.parse_data)
选项2:
import scrapy
class Amazon01Spider(scrapy.Spider):
name = 'amazon0.1'
allowed_domains = ['amazon.com']
start_urls = ['https://amazon.com/Books/s?ie=UTF8&page=1&rh=n%3A283155&srs=9187220011']
def parse(self, response):
cats = response.xpath('//*[@id="leftNavContainer"]//*[@class="a-unordered-list a-nostyle a-vertical s-ref-indent-two"]//li//@href').extract()
for cat in cats:
yield scrapy.Request("https://amazon.com/"+""+cat, callback = self.parse_data)
next_page_url = response.xpath('//*[@class="pagnLink"]/a/@href').extract_first()
yield scrapy.Request("https://amazon.com/"+""+next_page_url)
def parse_data(self, response):
items = response.xpath('//*[@class="a-fixed-left-grid-col a-col-right"]')
for item in items:
name = item.xpath('.//*[@class="a-row a-spacing-small"]/div/a/h2/text()').extract_first()
yield {'Name': name}