一.将图片下载到同一个文件夹中
1.novel.py
# 需要下载的图片地址,需要是一个列表# 如果不下载,只是将地址保存在数据库中,不需要设置列表novel['img_url'] = [img_url]
2.在pipelines.py中自定义自己的pipeline
from scrapy.pipelines.images import ImagesPipeline
from scrapy.http import Requestclass CustomImagesPipeline(ImagesPipeline):def get_media_requests(self, item, info):# 从item中获取要下载图片的url,根据url构造Request()对象,并返回该对象image_url = item['img_url'][0]yield Request(image_url, meta={'item': item})def file_path(self, request, response=None, info=None):# 用来自定义图片的下载路径item = request.meta['item']url = item['img_url'][0].split('/')[-2]return '%s.jpg' % urldef item_completed(self, results, item, info):# 图片下载完成后,返回的结果resultsprint(results)return item
3.在settings中开启自己的pipeline
ITEM_PIPELINES = {# 启用scrapy自带的图片下载ImagesPipeline'scrapy.pipelines.images.ImagesPipeline': None,# 如果采用自定义的CustomImagesPipeline,需要将自带的ImagesPipeline设置为None。'NovelSpider.pipelines.CustomImagesPipeline': 1,
}# 配置图片的保存目录
IMAGES_STORE = 'imgs'
# 在ImagesPipeline进行下载图片是,配置图片对应的Item字段
IMAGES_URLS_FIELD = 'img_url'
二.将图片分类下载(以奇书网为例)
1.在pipelines.py中自定义自己的pipeline
from scrapy.pipelines.images import ImagesPipeline
from scrapy.http import Requestclass QishuImagePipeline(ImagesPipeline):def get_media_requests(self, item, info):image_url = item['img_url']yield Request(image_url, meta={'item': item})def file_path(self, request, response=None, info=None):item = request.meta['item']
#分类名称category_name = item['category_name']
#图片名称img_name = item['img_url'].split('/')[-1]path = category_name + '/' + img_namereturn pathdef item_completed(self, results, item, info):print(results)return item
2.在settings中开启自己的pipeline
ITEM_PIPELINES = {'Qishu.pipelines.QishuImagePipeline': 1,'scrapy.pipelines.images.ImagesPipeline': None,}
IMAGES_STORE = 'images'
IMAGES_URLS_FIELD = 'img_url'