如何用python爬取微博热搜数据并保存

时间:2022-01-21 03:15:02

主要用到requests和bf4两个库
将获得的信息保存在d://hotsearch.txt下

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import requests;
import bs4
mylist=[]
r = requests.get(url='https://s.weibo.com/top/summary?refer=top_hot&topnav=1&wvr=6',timeout=10)
print(r.status_code) # 获取返回状态
r.encoding=r.apparent_encoding
demo = r.text
from bs4 import beautifulsoup
soup = beautifulsoup(demo,"html.parser")
for link in soup.find('tbody') :
 hotnumber=''
 if isinstance(link,bs4.element.tag):
#  print(link('td'))
  lis=link('td')
  hotrank=lis[1]('a')[0].string#热搜排名
  hotname=lis[1].find('span')#热搜名称
  if isinstance(hotname,bs4.element.tag):
   hotnumber=hotname.string#热搜指数
   pass
  mylist.append([lis[0].string,hotrank,hotnumber,lis[2].string])
f=open("d://hotsearch.txt","w+")
for line in mylist:
 f.write('%s %s %s %s\n'%(line[0],line[1],line[2],line[3]))

如何用python爬取微博热搜数据并保存

知识点扩展:利用python爬取微博热搜并进行数据分析

爬取微博热搜

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import schedule
import pandas as pd
from datetime import datetime
import requests
from bs4 import beautifulsoup
 
url = "https://s.weibo.com/top/summary?cate=realtimehot&sudaref=s.weibo.com&display=0&retcode=6102"
get_info_dict = {}
count = 0
 
def main():
  global url, get_info_dict, count
  get_info_list = []
  print("正在爬取数据~~~")
  html = requests.get(url).text
  soup = beautifulsoup(html, 'lxml')
  for tr in soup.find_all(name='tr', class_=''):
    get_info = get_info_dict.copy()
    get_info['title'] = tr.find(class_='td-02').find(name='a').text
    try:
      get_info['num'] = eval(tr.find(class_='td-02').find(name='span').text)
    except attributeerror:
      get_info['num'] = none
    get_info['time'] = datetime.now().strftime("%y/%m/%d %h:%m")
    get_info_list.append(get_info)
  get_info_list = get_info_list[1:16]
  df = pd.dataframe(get_info_list)
  if count == 0:
    df.to_csv('datas.csv', mode='a+', index=false, encoding='gbk')
    count += 1
  else:
    df.to_csv('datas.csv', mode='a+', index=false, header=false, encoding='gbk')
 
# 定时爬虫
schedule.every(1).minutes.do(main)
 
while true:
  schedule.run_pending()

pyecharts数据分析

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import bar, timeline, grid
from pyecharts.globals import themetype, currentconfig
 
df = pd.read_csv('datas.csv', encoding='gbk')
print(df)
t = timeline(init_opts=opts.initopts(theme=themetype.macarons)) # 定制主题
for i in range(int(df.shape[0]/15)):
  bar = (
    bar()
      .add_xaxis(list(df['title'][i*15: i*15+15][::-1])) # x轴数据
      .add_yaxis('num', list(df['num'][i*15: i*15+15][::-1])) # y轴数据
      .reversal_axis() # 翻转
      .set_global_opts( # 全局配置项
      title_opts=opts.titleopts( # 标题配置项
        title=f"{list(df['time'])[i * 15]}",
        pos_right="5%", pos_bottom="15%",
        title_textstyle_opts=opts.textstyleopts(
          font_family='kaiti', font_size=24, color='#ff1493'
        )
      ),
      xaxis_opts=opts.axisopts( # x轴配置项
        splitline_opts=opts.splitlineopts(is_show=true),
      ),
      yaxis_opts=opts.axisopts( # y轴配置项
        splitline_opts=opts.splitlineopts(is_show=true),
        axislabel_opts=opts.labelopts(color='#dc143c')
      )
    )
      .set_series_opts( # 系列配置项
      label_opts=opts.labelopts( # 标签配置
        position="right", color='#9400d3')
    )
  )
  grid = (
    grid()
      .add(bar, grid_opts=opts.gridopts(pos_left="24%"))
  )
  t.add(grid, "")
  t.add_schema(
    play_interval=1000, # 轮播速度
    is_timeline_show=false, # 是否显示 timeline 组件
    is_auto_play=true, # 是否自动播放
  )
 
t.render('时间轮播图.html')

到此这篇关于如何用python爬取微博热搜数据并保存的文章就介绍到这了,更多相关python爬取微博热搜数据内容请搜索服务器之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持服务器之家!

原文链接:https://blog.csdn.net/naiue/article/details/106876989