python版本:python3.4
第一次尝试使用爬虫,没有做什么高端的事情,就是简单的爬了一下百度
用的也是python里面自带的 urllib库里面的东西,以及用了一个叫做deque的数据结构,有点像是队列,又有点不同
代码
import re
import urllib.request
import urllib
from collections import deque
queue = deque()
visited = set()
url = 'http://www.baidu.com'
queue.append(url)
cnt = 0
while queue:
url = queue.popleft()
visited = visited | {url}
print('climb num:' + str(cnt) + ' now climb ---> ' + str(url) )
cnt += 1
urlop = urllib.request.urlopen(url)
if 'html' not in urlop.getheader('Content-Type'):
continue
try:
data = urlop.read().decode('utf-8')
except:
continue
linkre = re.compile('href=\"(.+?)\"')
for x in linkre.findall(data):
if 'http' in x and x not in visited:
queue.append(x)
print('append in queue ---> ' + x )