Mir*_*lic 6 python lxml scrapy scrapy-spider
我有scrapy框架的代码:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.contrib.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from lxml import html
class Scrapy1Spider(scrapy.Spider):
name = "scrapy1"
allowed_domains = ["sfbay.craigslist.org"]
start_urls = (
'http://sfbay.craigslist.org/search/npo',
)
rules = (Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[@class="button next"]',)), callback="parse", follow= True),)
def parse(self, response):
site = html.fromstring(response.body_as_unicode())
titles = site.xpath('//div[@class="content"]/p[@class="row"]')
print len(titles), 'AAAA'
Run Code Online (Sandbox Code Playgroud)
但问题是,我得到100个结果,它不会转到下一页.
这有什么不对?
Fra*_*tin 12
你rule没有被使用,因为你没有使用CrawlSpider.
所以你必须requests手动创建下一页,如下所示:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.contrib.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from lxml import html
class Scrapy1Spider(scrapy.Spider):
name = "craiglist"
allowed_domains = ["sfbay.craigslist.org"]
start_urls = (
'http://sfbay.craigslist.org/search/npo',
)
Rules = (Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[@class="button next"]',)), callback="parse", follow= True),)
def parse(self, response):
site = html.fromstring(response.body_as_unicode())
titles = site.xpath('//div[@class="content"]/p[@class="row"]')
print len(titles), 'AAAA'
# follow next page links
next_page = response.xpath('.//a[@class="button next"]/@href').extract()
if next_page:
next_href = next_page[0]
next_page_url = 'http://sfbay.craigslist.org' + next_href
request = scrapy.Request(url=next_page_url)
yield request
Run Code Online (Sandbox Code Playgroud)
或者使用CrawlSpider如下:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from lxml import html
class Scrapy1Spider(CrawlSpider):
name = "craiglist"
allowed_domains = ["sfbay.craigslist.org"]
start_urls = (
'http://sfbay.craigslist.org/search/npo',
)
rules = (Rule(LinkExtractor(allow=(), restrict_xpaths=('//a[@class="button next"]',)), callback="parse_page", follow= True),)
def parse_page(self, response):
site = html.fromstring(response.body_as_unicode())
titles = site.xpath('//div[@class="content"]/p[@class="row"]')
print len(titles), 'AAAA'
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
13912 次 |
| 最近记录: |