一、Urllib方法
Urllib是python内置的HTTP请求库
1
2
3
4
5
6
7
8
9
|
import urllib.request
#1.定位抓取的url
url = 'http://www.baidu.com/'
#2.向目标url发送请求
response = urllib.request.urlopen(url)
#3.读取数据
data = response.read()
# print(data) #打印出来的数据有ASCII码
print (data.decode( 'utf-8' )) #decode将相应编码格式的数据转换成字符串
|
1
2
3
4
5
6
7
8
9
10
|
#post请求
import urllib.parse
url = 'http://www.iqianyue.com/mypost/'
#构建上传的data
postdata = urllib.parse.urlencode({
'name' : 'Jack' ,
'pass' : '123456'
}).encode( 'utf-8' ) #字符串转化成字节流数据
html = urllib.request.urlopen(url,data = postdata).read()
print (html)
|
1
2
3
4
5
6
7
8
|
#headers针对检验头信息的反爬机制
import urllib.request
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
request1 = urllib.request.Request( 'https://www.dianping.com/' ,headers = headers) #Request类构建了一个完整的请求
response1 = urllib.request.urlopen(request1).read()
print (response1.decode( 'utf-8' ))
|
1
2
3
4
5
6
7
8
9
10
11
|
#超时设置+异常处理
import urllib.request
import urllib.error
for i in range ( 20 ):
try :
response1 = urllib.request.urlopen( 'http://www.ibeifeng.com/' ,timeout = 0.01 )
print ( 'a' )
except urllib.error.URLError as e:
print (e)
except BaseException as a: #所有异常的基类
print (a)
|
二、requests方法
–Requests是用python语言基于urllib编写的,采用的是Apache2 Licensed开源协议的HTTP库
–urllib还是非常不方便的,而Requests它会比urllib更加方便,可以节约我们大量的工作。
–requests是python实现的最简单易用的HTTP库,建议爬虫使用requests库。
–默认安装好python之后,是没有安装requests模块的,需要单独通过pip安装
1
2
3
4
5
6
7
8
9
10
11
|
import requests
#get请求
r = requests.get( 'https://www.taobao.com/' )
#打印字节流数据
# print(r.content)
# print(r.content.decode('utf-8')) #转码
print (r.text) #打印文本数据
import chardet
#自动获取到网页编码,返回字典类型
print (chardet.detect(r.content))
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
POST请求实现模拟表单登录
import requests
#构建上传到网页的数据
data = {
'name' : 'Jack' ,
'pass' : '123456'
}
#带登陆数据发送请求
r = requests.post( 'http://www.iqianyue.com/mypost/' ,data = data)
print (r.text) #打印请求数据
#将登录后的html储存在本地
f = open ( 'login.html' , 'wb' )
f.write(r.content) #写入字节流数据
f.close()
|
1
2
3
4
5
6
7
8
9
|
#针对检验头信息的反爬机制headers
import requests
#构建headers
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
r = requests.get( 'https://www.dianping.com/' ,headers = headers)
print (r.text)
print (r.status_code) #状态403 被拦截了(查看状态)
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
#cookies
#跳过登陆,获取资源
import requests
f = open ( 'cookie.txt' , 'r' ) #打开cookie文件
#初始化cookies,声明一个空字典
cookies = {}
#按照字符 ; 进行切割读取,返回列表数据,然后遍历
#split():切割函数 strip()去除字符串前后空白
for line in f.read().split( ';' ):
#split将参数设置为1,把字符串切割成两个部分
name,value = line.strip().split( '=' , 1 )
#为空字典cookies添加内容
cookies[name] = value
r = requests.get( 'http://www.baidu.com' ,cookies = cookies)
data = r.text
f1 = open ( 'baidu.html' , 'w' ,encoding = 'utf-8' )
f1.write(data)
f1.close()
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
#设置代理(网站搜索免费代理ip)
#解决网页封IP的问题
import requests
proxies = {
#'协议':'ip:端口号'
'HTTP' : '222.83.160.37:61205'
}
req = requests.get( 'http://www.taobao.com/' ,proxies = proxies)
print (req.text)
#设置超时
import requests
from requests.exceptions import Timeout
try :
response = requests.get( "http://www.ibeifeng.com " , timeout = 0.01 )
print (response.status_code)
except Timeout:
print ( '访问超时!' )
|
三、BS4- BeautifulSoup4解析
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
|
from bs4 import BeautifulSoup
html = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" rel="external nofollow" rel="external nofollow" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" rel="external nofollow" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" rel="external nofollow" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
# #创建一个BS对象
soup = BeautifulSoup(html, 'html.parser' ) #html.parser默认解析器
print ( type (soup))
# 结构化输出
print (soup.prettify())
#1获取标签(只能获取第一条对应的标签)
print (soup.p) #获取p标签
print (soup.a) #获取a标签
print (soup.title) #获取title
#2获取标签内容
print (soup.title.string)
print (soup.a.string)
print (soup.body.string) #如果标签中有多个子标签返回None
print (soup.head.string) #如果标签中有一个子标签返回子标签里的文本
#3获取属性
print (soup.a.attrs) #返回字典
print (soup.a[ 'id' ]) #得到指定属性值
#4操作字节点
print (soup.p.contents) #得到标签下所有子节点
print (soup.p.children) #得到标签下所有子节点的迭代对象
#5操作父节点
print (soup.p.parent) #得到标签p的父节点其内部的所有内容
print (soup.p.parents) # 得到标签p的父节点的迭代对象
#6操作兄弟节点(同级的节点)
#next_sibling和previous_sibling分别获取节点的下一个和上一个兄弟元素
print (soup.a.next_sibling)
print (soup.a.previous_sibling)
#二.搜索文档数
#1标签名
#查询所有a标签
res1 = soup.find_all( 'a' )
print (res1)
#获取所有a标签下属性为class="sister"的标签(
#使用 class 做参数会导致语法错误,这里也要用class_)
print (soup.find_all( 'a' , class_ = "sister" ))
#2正则表达式
import re
#查询所有包含d字符的标签
res2 = soup.find_all(re. compile ( 'd+' ))
print (res2)
#3列表
#查找所有的title标签和a标签
res3 = soup.find_all([ 'title' , 'a' ])
print (res3)
#4关键词
#查询属性id='link1'的标签
res4 = soup.find_all( id = 'link1' )
print (res4)
#5内容匹配
res5 = soup.find_all(text = 'Tillie' ) #文本匹配
res55 = soup.find_all(text = re. compile ( 'Dormouse' ))
print (res55)
#6嵌套选择
print (soup.find_all( 'p' ))
#查看所有p标签下所有的a标签
for i in soup.find_all( 'p' ):
print (i.find_all( 'a' ))
#三.CSS选择器
#1根据标签查询对象
res6 = soup.select( 'a' ) #返回列表
print (res6) #得到所有的a标签
#2根据ID属性查询标签对象(id用#)
print (soup.select( '#link2' ))
#3根据class属性查询标签对象(class用.)
print (soup.select( '.sister' ))
print (soup.select( '.sister' )[ 2 ].get_text()) #获取文本内容
#4属性选择(获取a标签里=href属性值的标签)
print (soup.select( 'a[href="http://example.com/elsie" rel="external nofollow" rel="external nofollow" ]' ))
#5包含选择(获取)
print (soup.select( 'p a#link1' ))
#6并列选择
print (soup.select( 'a#link1,a#link2' ))
#7得到标签内容
res7 = soup.select( 'p a.sister' )
for i in res7:
print (i.get_text())
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
#练习:爬取51job主页12个职位
from bs4 import BeautifulSoup
import requests
url = 'https://www.51job.com/'
headers = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' }
html = requests.get(url,headers = headers)
data = html.content.decode( 'gbk' )
soup = BeautifulSoup(data, 'html.parser' )
#获取span标签,class_="at"属性
span = soup.find_all( 'span' , class_ = "at" )
# for i in span:
# print(i.get_text())
#select方法(CSS选择器)
span1 = soup.select( 'span[class="at"]' )
for m in span1:
print (m.get_text())
|
四、XPath语法
XPath 是一门在 XML 文档中查找信息的语言。
XPath 可用来在 XML 文档中对元素和属性进行遍历
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
|
from lxml import etree
text = '''
<html>
<head>
<title>春晚</title>
</head>
<body>
<h1 name="title">个人简介</h1>
<div name="desc">
<p name="name">姓名:<span>岳云鹏</span></p>
<p name="addr">住址:中国 河南</p>
<p name="info">代表作:五环之歌</p>
</div>
'''
#初始化
html = etree.HTML(text)
# result=etree.tostring(html) #字节流
# print(result.decode('utf-8'))
#查询所有的p标签
p_x = html.xpath( '//p' )
print (p_x)
#查询所有p标签的文本,用text只能拿到该标签下的文本,不包括子标签
for i in p_x:
print (i.text) #发现<span>没有拿到
#优化,用string()拿标签内部的所有文本
for i in p_x:
print (i.xpath( 'string(.)' ))
# 查询所有name属性的值
attr_name = html.xpath( '//@name' )
print (attr_name)
#查询出所有包含name属性的标签
attr_name1 = html.xpath( '//*[@name]' )
print (attr_name1)
|
到此这篇关于一文带你了解Python 四种常见基础爬虫方法介绍的文章就介绍到这了,更多相关Python 基础爬虫内容请搜索服务器之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持服务器之家!
原文链接:https://blog.csdn.net/qq_35866846/article/details/107801812