本文实例讲述了Python使用Selenium模块模拟浏览器抓取斗鱼直播间信息。分享给大家供大家参考,具体如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
import time
from multiprocessing import Pool
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup
from pymongo import MongoClient
from pymongo.errors import PyMongoError
# monogdb配置信息
MONGO_HOST = "localhost"
MONGO_DATABASE = "douyu"
MONGO_TABLE = "zhibo"
client = MongoClient(host = MONGO_HOST)
db = client[MONGO_DATABASE]
# PhantomJS 命令行相关配置
# 参见 http://phantomjs.org/api/command-line.html
SERVICE_ARGS = [ '--disk-cache=true' , '--load-images=false' ]
# driver = webdriver.Chrome() # 有界面
driver = webdriver.PhantomJS(service_args = SERVICE_ARGS) # *面
delay = 10
wait = WebDriverWait(driver, delay)
driver.maximize_window()
def get_total_pages():
url = 'https://www.douyu.com/directory/all'
driver.get(url)
pages = int (driver.find_element_by_css_selector(
'.shark-pager-dot + .shark-pager-item' ).text)
print ( "正在获取第1页数据" )
room_list = get_rooms_by_beautifulsoup()
save_to_monogodb(room_list)
return pages
# 根据页码获取指定页数据,并将其保存到数据库中
def parse_page(page_num):
print ( "正在获取第%d页数据" % page_num)
try :
page_num_box = wait.until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, "input.jumptxt" )))
go_btn = wait.until(EC.element_to_be_clickable(
(By.CSS_SELECTOR, 'a.shark-pager-submit' )))
page_num_box.clear()
page_num_box.send_keys(page_num)
go_btn.click()
# driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# time.sleep(0.1)
wait.until(
EC.text_to_be_present_in_element(
(By.CSS_SELECTOR,
'.shark-pager-item.current' ),
str (page_num)))
# 对于By.CLASS_NAME invalid selector: Compound class names not permitted
room_list = get_rooms_by_beautifulsoup()
save_to_monogodb(room_list)
except TimeoutException:
print ( "请求第%d页失败" % page_num)
print ( "尝试重新获取第%d页" % page_num)
return parse_page(page_num)
# 通过bs4解析数据
def get_rooms_by_beautifulsoup():
'''
通过bs4库解析数据
获取直播间的名称,观看人数,标签,主播名
'''
wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, "ul#live-list-contentbox > li" )))
html = driver.page_source
soup = BeautifulSoup(html, 'lxml' )
rooms = soup.select( 'ul#live-list-contentbox > li' )
for room in rooms:
room_name = room.find(
'h3' , attrs = {
'class' : 'ellipsis' }).get_text(
strip = True )
view_count = room.find( 'span' , class_ = 'dy-num fr' ).text
tag = room.find( 'span' , class_ = 'tag ellipsis' ).text
hostname = room.find( 'span' , class_ = 'dy-name ellipsis fl' ).text
#print("房间名: " + room_name + "\t观看人数: " + view_count + "\t标签: " + tag + "\t主播名: " + hostname)
yield {
'room_name' : room_name,
'view_count' : view_count,
'tag' : tag,
'hostname' : hostname,
}
def save_to_monogodb(room_list):
for room in room_list:
try :
db[MONGO_TABLE].insert(room) # insert支持插入多条数据
print ( "mongodb插入数据成功:" , room)
except PyMongoError as e:
print ( "mongodb插入数据失败:" , room, e)
if __name__ = = '__main__' :
try :
total_pages = get_total_pages()
for page_num in range ( 2 , total_pages + 1 ):
parse_page(page_num)
except Exception as e:
print ( "出错了" , e)
finally : # 确保 浏览器能正常关闭
print ( "共有%d页" % total_pages)
driver.close()
|
希望本文所述对大家Python程序设计有所帮助。
原文链接:http://www.cnblogs.com/hupeng1234/p/7138551.html