用python爬取网页表格数据,供大家参考,具体内容如下
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
from bs4 import BeautifulSoup
import requests
import csv
import bs4
#检查url地址
def check_link(url):
try :
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except :
print ( '无法链接服务器!!!' )
#爬取资源
def get_contents(ulist,rurl):
soup = BeautifulSoup(rurl, 'lxml' )
trs = soup.find_all( 'tr' )
for tr in trs:
ui = []
for td in tr:
ui.append(td.string)
ulist.append(ui)
#保存资源
def save_contents(urlist):
with open ( "D:/2016年中国企业500强排行榜.csv" , 'w' ) as f:
writer = csv.writer(f)
writer.writerow([ '2016年中国企业500强排行榜' ])
for i in range ( len (urlist)):
writer.writerow([urlist[i][ 1 ],urlist[i][ 3 ],urlist[i][ 5 ]])
def main():
urli = []
url = "http://www.maigoo.com/news/463071.html"
rs = check_link(url)
get_contents(urli,rs)
save_contents(urli)
main()
|
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持服务器之家。
原文链接:http://blog.csdn.net/zhuxunyuoyi/article/details/75210705