1,使用到的第三方库
requests
BeautifulSoup 美味汤
worldcloud 词云
jieba 中文分词
matplotlib 绘图
2,代码实现部分
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
import requests
import wordcloud
import jieba
from bs4 import BeautifulSoup
from matplotlib import pyplot as plt
from pylab import mpl
#设置字体
mpl.rcParams[ 'font.sans-serif' ] = [ 'SimHei' ]
mpl.rcParams[ 'axes.unicode_minus' ] = False
url = 'https://s.weibo.com/top/summary?Refer=top_hot&topnav=1&wvr=6'
try :
#获取数据
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text, 'html.parser' )
data = soup.find_all( 'a' )
d_list = []
for item in data:
d_list.append(item.text)
words = d_list[ 4 : - 11 :]
#中文分词
result = list (jieba.cut(words[ 0 ]))
for word in words[ 1 ::]:
result.extend(jieba.cut(word))
redata = []
for it in result:
if len (it) < = 1 :
continue
else :
redata.append(it)
result_str = ' ' .join(redata)
#输出词云图
font = r 'C:\Windows\Fonts\simhei.ttf'
w = wordcloud.WordCloud(font_path = font,width = 600 ,height = 400 )
w.generate(result_str)
w.to_file( '微博热搜关键词词云.png' )
key = list ( set (redata))
x,y = [],[]
#筛选数据
for st in key:
count = redata.count(st)
if count < = 1 :
continue
else :
x.append(st)
y.append(count)
x.sort()
y.sort()
#绘制结果图
plt.plot(x,y)
plt.show()
except Exception as e:
print (e)
|
3,运行结果
到此这篇关于Python爬虫分析微博热搜关键词的文章就介绍到这了,更多相关Python爬虫微博热搜内容请搜索服务器之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持服务器之家!
原文链接:https://blog.csdn.net/weixin_45014413/article/details/113854155