爬虫之scrapy--基本操作

时间:2024-09-30 12:03:50

参考博客  :https://www.cnblogs.com/wupeiqi/p/6229292.html

Scrapy是一个为了爬取网站数据,提取结构性数据而编写的应用框架。 其可以应用在数据挖掘,信息处理或存储历史数据等一系列的程序中。

Scrapy 使用了 Twisted异步网络库来处理网络通讯。

爬虫之scrapy--基本操作

安装 scrapy

 Linux
pip3 install scrapy Windows
a. pip3 install wheel
b. 下载twisted http://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted
c. 进入下载目录,执行 pip3 install Twisted‑17.1.0‑cp35‑cp35m‑win_amd64.whl
d. pip3 install scrapy
e. 下载并安装pywin32:https://sourceforge.net/projects/pywin32/files/

scrapy 基本使用流程

一、 创建项目  (projects) 命令:  scrapy startproject    项目名

二、创建任务 (spide) 命令:scrapy genspider 任务名  域名

三、运行作务  命令:  scrapy crawl 任务名

PS:  命令:scrapy list     #查看爬虫 任务名 列表

SCRAPY 项目结构图

爬虫之scrapy--基本操作

scrapy_test\

    |---commads\

        |--crawlall.py

        #自制scrapy命令   使用如图

爬虫之scrapy--基本操作

 #!usr/bin/env python
#-*-coding:utf-8-*-
# Author calmyan
#scrapy_test
#2018/6/7 11:14
#__author__='Administrator' from scrapy.commands import ScrapyCommand
from scrapy.utils.project import get_project_settings class Command(ScrapyCommand): requires_project = True def syntax(self):
return '[options]' def short_desc(self):
return 'Runs all of the spiders' def run(self, args, opts):
spider_list = self.crawler_process.spiders.list()#爬虫任务列表
for name in spider_list:
self.crawler_process.crawl(name, **opts.__dict__) #加载到执行列表
self.crawler_process.start()#开始并发执行

crawlall.py

    |---spiders\

        |----cnblogs.py

        #任务一、    cnblogs  目标:爬取博客园 首页的文章标题与链接

 # -*- coding: utf-8 -*-
import scrapy
import io
import sys
from scrapy.http import Request
# from scrapy.dupefilter import RFPDupeFilter
from scrapy.selector import Selector,HtmlXPathSelector #特殊对象 筛选器 Selector.xpath
# sys.stdout=io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')#字符转换
from ..items import ScrapyTestItem
class CnblogsSpider(scrapy.Spider):
name = 'cnblogs'
allowed_domains = ['cnblogs.com']
start_urls = ['http://cnblogs.com/'] def parse(self, response):
# print(response.body)
#print(response.text)
hxs=Selector(response=response).xpath('//div[@id="post_list"]/div[@class="post_item"]') #转换为对象格式
# //全局对象目录 ./当前对象目录 //往下多层查找 text()获取内容 extract()格式化 extract_first()第一个
# itme_list=hxs.xpath('./div[@class="post_item_body"]//a[@class="titlelnk"]/text()').extract_first()#text()获取内容 extract()格式化
itme_list=hxs.xpath('./div[@class="post_item_body"]//a[@class="titlelnk"]/text()').extract()#text()获取内容 extract()格式化
# href_list=hxs.xpath('./div[@class="post_item_body"]//a[@class="titlelnk"]/@href').extract_first()#text()获取内容 extract()格式化
href_list=hxs.xpath('./div[@class="post_item_body"]//a[@class="titlelnk"]/@href').extract()#text()获取内容 extract()格式化
# for item in itme_list:
# print('itme----',item) yield ScrapyTestItem(title=itme_list,href=href_list)
#如果有分页 继续下载页面
pags_list=Selector(response=response).xpath('//div[@id="paging_block"]//a/@href').extract()
print(pags_list)
for pag in pags_list:
url='http://cnblogs.com/'+pag
yield Request(url=url)
# yield Request(url=url,callback=self.fun()) #可自定义使用方法
def fun(self):
print('===========')

cnblogs.py

       |----chouti.py

        #任务二、  chouti  目标:爬取抽屉新热榜首页的文章 并进行点赞

# -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import HtmlXPathSelector,Selector
from scrapy.http.request import Request
from scrapy.http.cookies import CookieJar
import getpass
from scrapy import FormRequest class ChouTiSpider(scrapy.Spider):
# 爬虫应用的名称,通过此名称启动爬虫命令
name = "chouti"
# 允许的域名
allowed_domains = ["chouti.com"] cookie_dict = {}
has_request_set = {} #起始就自定义
def start_requests(self):
url = 'https://dig.chouti.com/'
# self.headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
# return [Request(url=url, callback=self.login,headers=self.headers)]
yield Request(url=url, callback=self.login) def login(self, response):
cookie_jar = CookieJar() #创建存放cookie的对象
cookie_jar.extract_cookies(response, response.request)#解析提取存入对象
# for k, v in cookie_jar._cookies.items():
# for i, j in v.items():
# for m, n in j.items():
# self.cookie_dict[m] = n.value
# print('-----',m,'::::::::',n.value,'--------')
self.cookie=cookie_jar._cookies#存入cookie phone=input('请输入手机号:').split()
password=getpass.getpass('请输入密码:').split()
body='phone=86%s&password=%s&oneMonth=1'%(phone,password) req = Request(
url='https://dig.chouti.com/login',
method='POST',
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
# headers=self.headers,
# body=body,
body='phone=86%s&password=%s&oneMonth=1'%(phone,password),
# cookies=self.cookie_dict,
cookies=self.cookie,
callback=self.check_login#回调函数
)
yield req def check_login(self, response):
print('登陆状态------------',response.text)
req = Request(
url='https://dig.chouti.com/',
method='GET',
callback=self.show,
cookies=self.cookie,
# cookies=self.cookie_dict,
dont_filter=True
)
yield req def show(self, response):
# print(response)
hxs = HtmlXPathSelector(response)
news_list = hxs.select('//div[@id="content-list"]/div[@class="item"]')
# hxs=Selector(response=response)
# news_list = hxs.xpath('//div[@id="content-list"]/div[@class="item"]')
for new in news_list:
# temp = new.xpath('div/div[@class="part2"]/@share-linkid').extract()
link_id = new.xpath('*/div[@class="part2"]/@share-linkid').extract_first() yield Request(
url='https://dig.chouti.com/link/vote?linksId=%s' %link_id,
method='POST',
cookies=self.cookie,
# cookies=self.cookie_dict,
callback=self.do_favor
)
#分页
page_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href').extract()
for page in page_list: page_url = 'https://dig.chouti.com%s' % page
import hashlib
hash = hashlib.md5()
hash.update(bytes(page_url,encoding='utf-8'))
key = hash.hexdigest()
if key in self.has_request_set:
pass
else:
self.has_request_set[key] = page_url
yield Request(
url=page_url,
method='GET',
callback=self.show
) def do_favor(self, response): print(response.text)

chouti.py

    |---items.py

  #这里可自定义的字段项 通过pipelines 进行持久化

 # -*- coding: utf-8 -*-

 # Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html import scrapy class ScrapyTestItem(scrapy.Item):
# define the fields for your item here like:
#这里可自定义的字段项 # name = scrapy.Field()
title=scrapy.Field()
href=scrapy.Field()
pass

items.py

    |---middlewares.py

  #中间件   通过中间件可以对任务进行不同的处理  如:代理的设置  ,请求需要被下载时,经过所有下载器中间件的process_request调用  , 中间件处理异常等

 # -*- coding: utf-8 -*-

 # Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals class ScrapyTestSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
"""
下载完成,执行,然后交给parse处理
:param response:
:param spider:
:return:
"""
# Should return None or raise an exception.
return None def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
"""
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
"""
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
"""
异常调用
:param response:
:param exception:
:param spider:
:return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
"""
# Should return either None or an iterable of Response, dict
# or Item objects.
pass def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
"""
爬虫启动时调用
:param start_requests:
:param spider:
:return: 包含 Request 对象的可迭代对象
"""
# Must return only requests (not items).
for r in start_requests:
yield r def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name) class ScrapyTestDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects. @classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
"""
请求需要被下载时,经过所有下载器中间件的process_request调用
:param request:
:param spider:
:return:
None,继续后续中间件去下载;
Response对象,停止process_request的执行,开始执行process_response
Request对象,停止中间件的执行,将Request重新调度器
raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
"""
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None def process_response(self, request, response, spider):
# Called with the response returned from the downloader. # Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
"""
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return:
Response 对象:转交给其他中间件process_response
Request 对象:停止中间件,request会被重新调度下载
raise IgnoreRequest 异常:调用Request.errback
"""
return response def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
"""
当下载处理器(download handler)或 process_request() (下载中间件)抛出异常
:param response:
:param exception:
:param spider:
:return:
None:继续交给后续中间件处理异常;
Response对象:停止后续process_exception方法
Request对象:停止中间件,request将会被重新调用下载
"""
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name) #=================自定义代理设置=====================================
from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware
import random,base64,six def to_bytes(text, encoding=None, errors='strict'):
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors) class ProxyMiddleware(object):
def process_request(self, request, spider):
PROXIES = [
{'ip_port': '111.11.228.75:80', 'user_pass': ''},
{'ip_port': '120.198.243.22:80', 'user_pass': ''},
{'ip_port': '111.8.60.9:8123', 'user_pass': ''},
{'ip_port': '101.71.27.120:80', 'user_pass': ''},
{'ip_port': '122.96.59.104:80', 'user_pass': ''},
{'ip_port': '122.224.249.122:8088', 'user_pass': ''},
]
proxy = random.choice(PROXIES)
if proxy['user_pass'] is not None:
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass']))
request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
print("**************ProxyMiddleware have pass************" + proxy['ip_port'])
else:
print("**************ProxyMiddleware no pass************" + proxy['ip_port'])
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])

middlewares.py

    |---myextension.py

    #自定义扩展:信号  通过触发特定信号进行自定义的数据操作  如:任务开始时执行等

 #!usr/bin/env python
#-*-coding:utf-8-*-
# Author calmyan
#scrapy_test
#2018/6/7 11:39
#__author__='Administrator'
from scrapy import signals
import time,datetime class MyExtension(object):
def __init__(self, value):
self.value = value
self.time_format='%y-%m-%d:%H-%M-%S'
print('open-------->>>>>>>>>>>>>>>>>>',value) #类实例化前执行
@classmethod
def from_crawler(cls, crawler):
val = crawler.settings.get('MYEXTENSION_PATH')
ext = cls(val) # crawler.signals.connect(ext.engine_started, signal=signals.engine_started)
# crawler.signals.connect(ext.engine_stopped, signal=signals.engine_stopped)
crawler.signals.connect(ext.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(ext.spider_closed, signal=signals.spider_closed)
# crawler.signals.connect(ext.spider_error, signal=signals.spider_error)
crawler.signals.connect(ext.response_downloaded, signal=signals.response_downloaded) return ext def response_downloaded(self,spider):
print('下载时运行中===============') def engine_started(self,spider):
self.f=open(self.value,'a+',encoding='utf-8')
def engine_stopped(self,spider):
self.f.write('任务结束时间:')
self.f.close() #任务开始时调用
def spider_opened(self, spider):
print('open-------->>>>>>>>>>>>>>>>>>')
self.f=open(self.value,'a+',encoding='utf-8') # start_time=time.time()
start_time=time.strftime(self.time_format)
print(start_time,'开始时间',type(start_time))
self.f.write('任务开始时间:'+str(start_time)+'\r')
#任务结束时调用
def spider_closed(self, spider):
print('close-<<<<<<<<<<<<<--------',time.ctime())
end_time=datetime.date.fromtimestamp(time.time())
end_time=time.strftime(self.time_format)
self.f.write('任务结束时间:'+str(end_time)+'\r')
self.f.close() #出现错误时调用
def spider_error(self,spider):
print('----->>-----出现错误------<<------')

myextension.py

     |---pipelines.py

    #数据持久化操作

 # -*- coding: utf-8 -*-

 # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import types
from scrapy.exceptions import DropItem #中止pipelines任务
import time,datetime
class ScrapyTestPipeline(object):
def process_item(self, item, spider):
print('打印输出到屏幕',item)
print(spider,'=======')
if spider=="cnblogs":
raise DropItem()
return item class ScrapyTestPipeline2(object):
def __init__(self,v):
self.path = v
print(self.path,'```````````')
#self.f=open(v,'a+')
self.time_format='%y-%m-%d:%H-%M-%S' @classmethod
def from_crawler(cls, crawler):
"""
初始化时候,用于创建pipeline对象
:param crawler:
:return:
"""
val = crawler.settings.get('MYFILE_PATH')##可以读取settings中的定义变量
return cls(val) def open_spider(self,spider):
"""
爬虫开始执行时,调用
:param spider:
:return:
"""
self.f= open(self.path,'a+',encoding="utf-8")
start_time=time.strftime(self.time_format)
self.f.write('开始时间:'+str(start_time)+'\r') print('') def close_spider(self,spider):
"""
爬虫关闭时,被调用
:param spider:
:return:
"""
start_time=time.strftime(self.time_format)
self.f.write('结束时间:'+str(start_time)+'\r')
self.f.close()
print('') def process_item(self, item, spider):
print('保存到文件',type(item),item)
print('[]============>')
print(type(item['title']))
print('lambda end')
if isinstance(item['title'],list) :#判断数据类型
nubme=len(item['title'])
for x in range(nubme):
self.f.write('标题:'+item['title'][x]+' ')
self.f.write('链接:'+item['href'][x]+'\r')
elif isinstance(item['title'],str) :
self.f.write('标题:'+item['title']+'\r')
self.f.write('链接<'+item['href']+'>\r')
return item

pipelines.py

     |---settings.py

      #爬虫任务的相关配置

 # -*- coding: utf-8 -*-

 # Scrapy settings for scrapy_test project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html #--------------自定义文件的路径------------
MYFILE_PATH='TEST_FILE.txt'
MYEXTENSION_PATH='TWO2.txt'
##--------------自定义命令注册------------
COMMANDS_MODULE = 'scrapy_test.commads'#'项目名称.目录名称' # 1. 爬虫总项目名称
BOT_NAME = 'scrapy_test' # 2. 爬虫应用路径
SPIDER_MODULES = ['scrapy_test.spiders']
NEWSPIDER_MODULE = 'scrapy_test.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
# 3. 客户端 user-agent请求头
#USER_AGENT = 'scrapy_test (+http://www.yourdomain.com)' # Obey robots.txt rules
# 4. 禁止爬虫配置 遵守网站爬虫声明
ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
# 5. 并发请求数
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs # 6. 延迟下载秒数
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# 7. 单域名访问并发数,并且延迟下次秒数也应用在每个域名
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
# 单IP访问并发数,如果有值则忽略:CONCURRENT_REQUESTS_PER_DOMAIN,并且延迟下次秒数也应用在每个IP
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
# 8. 是否支持cookie,cookiejar进行操作cookie
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
# 9. Telnet用于查看当前爬虫的信息,操作爬虫等...
# 使用telnet ip port ,然后通过命令操作
#TELNETCONSOLE_ENABLED = False
#TELNETCONSOLE_HOST = '127.0.0.1'
#TELNETCONSOLE_PORT = [6023,] # 10. 默认请求头
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# 11. 定义pipeline处理请求
#注册后 通过 Item 调用任务处理 200 数字小优先
ITEM_PIPELINES = {
'scrapy_test.pipelines.ScrapyTestPipeline': 200,
'scrapy_test.pipelines.ScrapyTestPipeline2': 300,
} # Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# 12. 自定义扩展,基于信号进行调用
EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
'scrapy_test.myextension.MyExtension': 100,
} # 13. 爬虫允许的最大深度,可以通过meta查看当前深度;0表示无深度
DEPTH_LIMIT=1 #深度查找的层数 # 14. 爬取时,0表示深度优先Lifo(默认);1表示广度优先FiFo # 后进先出,深度优先
# DEPTH_PRIORITY = 0
# SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
# SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
# 先进先出,广度优先 # DEPTH_PRIORITY = 1
# SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'
# SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue' # 15. 调度器队列
# SCHEDULER = 'scrapy.core.scheduler.Scheduler'
# from scrapy.core.scheduler import Scheduler # 16. 访问URL去重 scrapy默认使用
# DUPEFILTER_CLASS='scrapy.dupefilter.RFPDupeFilter'#过滤 访问过的网址的类
DUPEFILTER_CLASS='md.RepeatFilter'#自定义过滤 访问过的网址的类
# DUPEFILTER_DEBUG=False #是否调用默认 """
17. 自动限速算法
from scrapy.contrib.throttle import AutoThrottle
自动限速设置
1. 获取最小延迟 DOWNLOAD_DELAY
2. 获取最大延迟 AUTOTHROTTLE_MAX_DELAY
3. 设置初始下载延迟 AUTOTHROTTLE_START_DELAY
4. 当请求下载完成后,获取其"连接"时间 latency,即:请求连接到接受到响应头之间的时间
5. 用于计算的... AUTOTHROTTLE_TARGET_CONCURRENCY
target_delay = latency / self.target_concurrency
new_delay = (slot.delay + target_delay) / 2.0 # 表示上一次的延迟时间
new_delay = max(target_delay, new_delay)
new_delay = min(max(self.mindelay, new_delay), self.maxdelay)
slot.delay = new_delay
"""
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# 开始自动限速
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
# 初始下载延迟
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# 最大下载延迟
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# 平均每秒并发数
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# 是否显示节流统计每一个接收到的响应:
#AUTOTHROTTLE_DEBUG = False """
18. 启用缓存
目的用于将已经发送的请求或相应缓存下来,以便以后使用
from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware
from scrapy.extensions.httpcache import DummyPolicy
from scrapy.extensions.httpcache import FilesystemCacheStorage
""" # Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# 是否启用缓存策略
#HTTPCACHE_ENABLED = True # 缓存策略:所有请求均缓存,下次在请求直接访问原来的缓存即可
# HTTPCACHE_POLICY = "scrapy.extensions.httpcache.DummyPolicy" # 缓存策略:根据Http响应头:Cache-Control、Last-Modified 等进行缓存的策略
# HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy" # 缓存超时时间
#HTTPCACHE_EXPIRATION_SECS = 0 # 缓存保存路径
#HTTPCACHE_DIR = 'httpcache' # 缓存忽略的Http状态码
#HTTPCACHE_IGNORE_HTTP_CODES = [] # 缓存存储的插件
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' """
19. 代理,需要在环境变量中设置
from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware 方式一:使用默认
os.environ
{
http_proxy:http://root:woshiniba@192.168.11.11:9999/
https_proxy:http://192.168.11.11:9999/
}
方式二:使用自定义下载中间件 def to_bytes(text, encoding=None, errors='strict'):
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors) class ProxyMiddleware(object):
def process_request(self, request, spider):
PROXIES = [
{'ip_port': '111.11.228.75:80', 'user_pass': ''},
{'ip_port': '120.198.243.22:80', 'user_pass': ''},
{'ip_port': '111.8.60.9:8123', 'user_pass': ''},
{'ip_port': '101.71.27.120:80', 'user_pass': ''},
{'ip_port': '122.96.59.104:80', 'user_pass': ''},
{'ip_port': '122.224.249.122:8088', 'user_pass': ''},
]
proxy = random.choice(PROXIES)
if proxy['user_pass'] is not None:
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass']))
request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
print "**************ProxyMiddleware have pass************" + proxy['ip_port']
else:
print "**************ProxyMiddleware no pass************" + proxy['ip_port']
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port']) DOWNLOADER_MIDDLEWARES = {
'step8_king.middlewares.ProxyMiddleware': 500,
} """ """
20. Https访问
Https访问时有两种情况:
1. 要爬取网站使用的可信任证书(默认支持)
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory" 2. 要爬取网站使用的自定义证书
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory" # https.py
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate) class MySSLFactory(ScrapyClientContextFactory):
def getCertificateOptions(self):
from OpenSSL import crypto
v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read())
v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read())
return CertificateOptions(
privateKey=v1, # pKey对象
certificate=v2, # X509对象
verify=False,
method=getattr(self, 'method', getattr(self, '_ssl_method', None))
)
其他:
相关类
scrapy.core.downloader.handlers.http.HttpDownloadHandler
scrapy.core.downloader.webclient.ScrapyHTTPClientFactory
scrapy.core.downloader.contextfactory.ScrapyClientContextFactory
相关配置
DOWNLOADER_HTTPCLIENTFACTORY
DOWNLOADER_CLIENTCONTEXTFACTORY """ #21. 爬虫中间件
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapy_test.middlewares.ScrapyTestSpiderMiddleware': 543,
# 'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
# 'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
# 'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
# 'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
# 'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
#}
#22. 下载中间件
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'scrapy_test.middlewares.ScrapyTestDownloaderMiddleware': 543,
# 默认下载中间件
# 'scrapy_test.middlewares.ProxyMiddleware': 543,
# 'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
# 'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
# 'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
# 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
# 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
# 'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
# 'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
# 'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
# 'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
# 'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
# 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
# 'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
# 'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
# 'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
}

settings.py

   |---md.py

     #自制的URL过滤

 #!usr/bin/env python
#-*-coding:utf-8-*-
# Author calmyan
#scrapy_test
#2018/6/6 15:19
#__author__='Administrator'
class RepeatFilter(object): #过滤类
def __init__(self):
self.visited_url=set() #生成一个集合
pass @classmethod
def from_settings(cls, settings):
#初始化是 调用
return cls() #是否访问过
def request_seen(self, request):
if request.url in self.visited_url:
return True
self.visited_url.add(request.url)#没有访问过 加入集合 返回False
return False #爬虫开启时执行 打开文件 \数据库
def open(self): # can return deferred
pass
#爬虫结束后执行
def close(self, reason): # can return a deferred
print('关闭,close ,')
pass
#日志
def log(self, request, spider): # log that a request has been filtered
print('URL',request.url)
pass

md.py