在这些给定的约束下,我设法用Scrapy编写了一个非常简单的搜寻器:
这是我的代码:(有一个实时示例,但可以正常运行)
from scrapy.contrib.spiders import CrawlSpider,Rule from scrapy.selector import HtmlXPathSelector from scrapy.http import Request from scrapySpider.items import SPage from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor class TestSpider4(CrawlSpider): name = "spiderSO" allowed_domains = ["cumulodata.com"] start_urls = ["http://www.cumulodata.com"] extractor = SgmlLinkExtractor() def parse_start_url(self, response): #3 print('----------manual call of',response) self.parse_links(response) print('----------manual call done') # 1 return Request(self.start_urls[0]) # does not call parse_links(example.com) # 2 return Request(self.start_urls[0],callback = self.parse_links) # does not call parse_links(example.com) rules = ( Rule(extractor,callback='parse_links',follow=True), ) def parse_links(self, response): hxs = HtmlXPathSelector(response) print('----------- manual parsing links of',response.url) links = hxs.select('//a') for link in links: title = link.select('@title') url = link.select('@href').extract()[0] meta={'title':title,} yield Request(url, callback = self.parse_page,meta=meta) def parse_page(self, response): print('----------- parsing page: ',response.url) hxs = HtmlXPathSelector(response) item=SPage() item['url'] = str(response.request.url) item['title']=response.meta['title'] item['h1']=hxs.select('//h1/text()').extract() yield item
我尝试通过3种方式解决此问题:
1:返回带有起始网址的请求- 不执行规则 2:与上述相同,但回调至parse_links-相同的问题 3:parse_links 在抓取起始网址后调用,通过实现parse_start_url,函数不会被调用
parse_links
parse_start_url
以下是日志:
----------manual call of <200 http://www.cumulodata.com>) ----------manual call done #No '----------- manual parsing links', so `parse_links` is never called!
版本号
这是一个效果很好的scraper:
from scrapy.contrib.spiders import CrawlSpider,Rule from scrapy.selector import HtmlXPathSelector from scrapy.http import Request from scrapySpider.items import SPage from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor class TestSpider4(CrawlSpider): name = "spiderSO" allowed_domains = ["cumulodata.com"] start_urls = ["http://www.cumulodata.com/"] extractor = SgmlLinkExtractor() rules = ( Rule(extractor,callback='parse_links',follow=True), ) def parse_start_url(self, response): list(self.parse_links(response)) def parse_links(self, response): hxs = HtmlXPathSelector(response) links = hxs.select('//a') for link in links: title = ''.join(link.select('./@title').extract()) url = ''.join(link.select('./@href').extract()) meta={'title':title,} cleaned_url = "%s/?1" % url if not '/' in url.partition('//')[2] else "%s?1" % url yield Request(cleaned_url, callback = self.parse_page, meta=meta,) def parse_page(self, response): hxs = HtmlXPathSelector(response) item=SPage() item['url'] = response.url item['title']=response.meta['title'] item['h1']=hxs.select('//h1/text()').extract() return item
变化:
已实现parse_start_url-不幸的是,当你为第一个请求指定回调时,规则不会执行。这是Scrapy内置的,我们只能通过变通办法来管理它。因此,我们list(self.parse_links(response))在此函数内部做一个 。为什么list()呢?因为parse_链接是一个生成器,生成器是lazy的,我们需要完全明确地调用它。
Scrapy
list(self.parse_links(response))
list()
cleaned_url = "%s/?1" % url if not '/' in url.partition('//')[2] else "%s?1" % url -这里发生了几件事情:
cleaned_url = "%s/?1" % url if not '/' in url.partition('//')[2] else "%s?1" % url
一个。我们在网址末尾添加“ /?1”-由于parse_links返回重复的网址,因此Scrapy会将其过滤掉。避免这种情况的更简单方法是传递dont_filter=True给Request()。但是,你所有的页面都是相互链接的(从pageAA返回索引,等等),dont_filter这里导致重复的请求和项目过多。
“ /?1”
dont_filter=True给Request()
b。if not '/' in url.partition('//')[2]-同样,这是由于你网站中的链接。内部链接之一是“ www.cumulodata.com”,另一个是“ www.cumulodata.com/”。由于我们明确添加了一种允许重复的机制,因此这又增加了一项。由于我们需要完美,因此我实施了此技巧。
b。if not '/' in url.partition('//')[2]
“ www.cumulodata.com/”
title = ''.join(link.select('./@title').extract())
''。join(list)
list [0]