get请求
简单使用
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
import requests
'''
想要学习python?python学习交流群:973783996满足你的需求,资料都已经上传群文件,可以自行下载!
'''
response = requests.get( "https://www.baidu.com/" )
#text返回的是unicode的字符串,可能会出现乱码情况
# print(response.text)
#content返回的是字节,需要解码
print (response.content.decode( 'utf-8' ))
# print(response.url) #https://www.baidu.com/
# print(response.status_code) #200
# print(response.encoding) #iso-8859-1
|
添加headers和params
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
import requests
params = {
'wd' : 'python'
}
headers = {
'user-agent' : 'mozilla/5.0 (windows nt 6.1; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/64.0.3282.140 safari/537.36'
}
response = requests.get( "https://www.baidu.com/s" ,params = params,headers = headers)
#content返回的是字节,需要解码
with open ( 'baidu.html' , 'w' ,encoding = 'utf-8' ) as f:
f.write(response.content.decode( 'utf-8' ))
|
post请求
爬去拉钩网职位信息
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
import requests
url = "https://www.lagou.com/jobs/positionajax.json?city=%e5%8c%97%e4%ba%ac&needaddtionalresult=false"
data = {
'first' : 'true' ,
'pn' : 1 ,
'kd' : 'python'
}
headers = {
"user-agent" : "mozilla/5.0 (windows nt 6.1; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/64.0.3282.140 safari/537.36" ,
"referer" : "https://www.lagou.com/jobs/list_python?city=%e5%8c%97%e4%ba%ac&cl=false&fromsearch=true&labelwords=&suginput="
}
response = requests.post(url,data = data,headers = headers)
# print(response.text)
print ( type (response.text)) #<class 'str'>
print ( type (response.json())) #<class 'dict'>
print (response.json()) #获取为字典的形式
|
使用代理
1
2
3
4
5
6
7
|
import requests
proxy = { 'http' : '115.210.31.236.55:9000' }
response = requests.get( "https://www.baidu.com/" ,proxies = proxy)
print (response.content.decode( 'utf-8' ))
|
session登录
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
# _*_ coding:utf-8 _*_
import requests
# 1. 创建session对象,可以保存cookie值
ssion = requests.session()
# 2. 处理 headers
headers = { 'user-agent' : 'mozilla/5.0 (windows nt 6.1; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/60.0.3112.101 safari/537.36' }
# 3. 需要登录的用户名和密码
data = { "email" : "158xxxxxxxx" , "password" : "pythonxxxxxxx" }
# 4. 发送附带用户名和密码的请求,并获取登录后的cookie值,保存在ssion里
ssion.post( "http://www.renren.com/plogin.do" , data = data)
# 5. ssion包含用户登录后的cookie值,可以直接访问那些登录后才可以访问的页面
response = ssion.get( "http://zhibo.renren.com/news/108" )
# 6. 打印响应内容
print (response.text)
|
以上所述是小编给大家介绍的python爬虫基础教程:requests库(二)详解整合,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对服务器之家网站的支持!
原文链接:https://blog.csdn.net/fei347795790/article/details/89153257