python 爬起点目录

时间:2022-06-07 09:06:48
 #目标:书名,简介,作者,字数
#首先确定源代码的列表
import urllib.request
import re
from bs4 import BeautifulSoup
import random
import time load=input("路径:")
num=input("输入页数:") def gethtml(url): #获取页面源代码html
page=urllib.request.urlopen(url)
html=page.read().decode('utf-8') #html是一个列表
soup=BeautifulSoup(html,'html.parser')
return soup def getbook(soup,load):
for i in range(1,21): xl=soup.find_all("li",{"data-rid":str(i)})
sm = re.compile(r'<h4><a .*?>(.*?)</a></h4>') #匹配书名
sm1=sm.findall(str(xl))
a="《"+sm1[0]+"》" ze = re.compile(r'<a class="name" .*?>(.*?)</a>')
ze1 = ze.findall(str(xl)) #匹配作者名
b=ze1[0] jj=re.compile(r'<p class="intro">([\s\S]*?)</p>')
jj1=jj.findall(str(xl)) #匹配简介
c=jj1[0] zs=re.compile(r'<span>(.*?)</span>')
zs1=zs.findall(str(xl))
d=zs1[1]
content=[a,b,c,d] for j in range(0,4): with open(load, 'a') as f:
if j == 3:
f.write(content[3])
else:
f.write(content[j]+"\n") with open(load, 'a') as f:
f.write("\n\n----------------------------------------------------------------------\n\n")
def geturl(num):
for page in range(1,int(num)+1): url="http://fin.qidian.com/?size=-1&sign=-1&tag=-1&chanId=-1&subCateId=-1&orderId=&update=-1&page=%d&month=-1&style=1&vip=0" % page soup=gethtml(url)
getbook(soup,load)
time.sleep(2.5) geturl(num)

实现