本文实例讲述了Python实现爬虫抓取与读写、追加到excel文件操作。分享给大家供大家参考,具体如下:
爬取糗事百科热门
安装 读写excel 依赖 pip install xlwt
安装 追加excel文件内容 依赖 pip install xlutils
安装 lxml
Python示例:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
import csv
import requests
from lxml import etree
import time
import xlwt
import os
from xlutils.copy import copy
import xlrd
data_infos_list = []
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36' }
# f = open('C:\\Users\\Administrator\\Desktop\\qiubaibook.csv', 'a+', newline='', encoding='utf-8')
# writer = csv.writer(f)
# writer.writerow(('author', 'sex', 'rank', 'content', 'great', 'comment', 'time'))
filename = 'C:\\Users\\Administrator\\Desktop\\qiubaibook.xls'
def get_info(url):
res = requests.get(url, headers = headers)
selector = etree.HTML(res.text)
# print(res.text)
htmls = selector.xpath( '//div[contains(@class,"article block untagged mb15")]' )
# // *[ @ id = "qiushi_tag_120024357"] / a[1] / div / span 内容
# //*[@id="qiushi_tag_120024357"]/div[2]/span[1]/i 好笑
# //*[@id="c-120024357"]/i 评论
# //*[@id="qiushi_tag_120024357"]/div[1]/a[2]/h2 作者
# //*[@id="qiushi_tag_120024357"]/div[1]/div 等级
# // womenIcon manIcon 性别
for html in htmls:
author = html.xpath( 'div[1]/a[2]/h2/text()' )
if len (author) = = 0 :
author = html.xpath( 'div[1]/span[2]/h2/text()' )
rank = html.xpath( 'div[1]/div/text()' )
sex = html.xpath( 'div[1]/div/@class' )
if len (sex) = = 0 :
sex = '未知'
elif 'manIcon' in sex[ 0 ]:
sex = '男'
elif 'womenIcon' in sex[ 0 ]:
sex = '女'
if len (rank) = = 0 :
rank = '-1'
contents = html.xpath( 'a[1]/div/span/text()' )
great = html.xpath( 'div[2]/span[1]/i/text()' ) # //*[@id="qiushi_tag_112746244"]/div[3]/span[1]/i
if len (great) = = 0 :
great = html.xpath( 'div[3]/span[1]/i/text()' )
comment = html.xpath( 'div[2]/span[2]/a/i/text()' ) # //*[@id="c-112746244"]/i
if len (comment) = = 0 :
comment = html.xpath( 'div[3]/span[2]/a/i/text()' )
# classes = html.xpath('a[1]/@class')
# writer.writerow((author[0].strip(), sex, rank[0].strip(), contents[0].strip(), great[0].strip(),
# comment[0].strip(), time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
data_infos = [author[ 0 ].strip(), sex, rank[ 0 ].strip(), contents[ 0 ].strip(), great[ 0 ].strip(),
comment[ 0 ].strip(), time.strftime( '%Y-%m-%d %H:%M:%S' , time.localtime(time.time()))]
data_infos_list.append(data_infos)
def write_data(sheet, row):
for data_infos in data_infos_list:
j = 0
for data in data_infos:
sheet.write(row, j, data)
j + = 1
row + = 1
if __name__ = = '__main__' :
urls = [ 'https://www.qiushibaike.com/8hr/page/{}/' . format (num) for num in range ( 1 , 14 )]
for url in urls:
print (url)
get_info(url)
time.sleep( 2 )
# 如果文件存在,则追加。如果文件不存在,则新建
if os.path.exists(filename):
# 打开excel
rb = xlrd.open_workbook(filename, formatting_info = True ) # formatting_info=True 保留原有字体颜色等样式
# 用 xlrd 提供的方法获得现在已有的行数
rn = rb.sheets()[ 0 ].nrows
# 复制excel
wb = copy(rb)
# 从复制的excel文件中得到第一个sheet
sheet = wb.get_sheet( 0 )
# 向sheet中写入文件
write_data(sheet, rn)
# 删除原先的文件
os.remove(filename)
# 保存
wb.save(filename)
else :
header = [ 'author' , 'sex' , 'rank' , 'content' , 'great' , 'comment' , 'time' ]
book = xlwt.Workbook(encoding = 'utf-8' )
sheet = book.add_sheet( '糗百' )
# 向 excel 中写入表头
for h in range ( len (header)):
sheet.write( 0 , h, header[h])
# 向sheet中写入内容
write_data(sheet, 1 )
book.save(filename)
|
希望本文所述对大家Python程序设计有所帮助。
原文链接:https://blog.csdn.net/lA6Nf/article/details/79352112