Python爬虫-第三章-4-利用BeautifulSoup模块爬取某网壁纸图库图片

时间:2023-01-11 11:04:51

思路:

1.提取子页面链接

2.访问子链接页面,提取下载地址

3.访问下载地址下载内容到本地

# Demo Describe:数据解析 bs4

import time

import requests
import random
import string
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

picType = input('输入想要爬取的类型: ')
startUrl = f'https://www.3gbizhi.com/search/2-{picType}/'
ua = UserAgent()
user_agent = ua.random
headers = {
'user-agent': user_agent
}
soup = BeautifulSoup(requests.get(startUrl, headers=headers).content, 'html.parser')
pages = int(soup.find('div', class_='mtw cl').findAll('a', class_=None)[-1].text)
for page in range(1, pages):
print(f'抓取第{page}页:')
user_agent = ua.random
headers = {
'user-agent': user_agent
}
url = f'https://www.3gbizhi.com/search/2-{picType}/{page}.html'
resp = requests.get(url, headers=headers)
mainPage = BeautifulSoup(resp.text, 'html.parser')
obj = mainPage.find('div', class_='searchlistw') \
.find('ul', class_='cl') \
.findAll('a')
for i in obj:
href = i.get('href')
childResp = requests.get(href, headers=headers)
childPage = BeautifulSoup(childResp.text, 'html.parser')
picName = childPage.find('div', class_='showtitle') \
.find('h2').text.split(',')[0]
randomstr = ''.join(random.choices(string.ascii_letters + string.digits, k=3))
picName += randomstr
picSrc = childPage.find('a', class_='bz_size_show').get('href')
# pictrue download
picResp = requests.get(picSrc, headers=headers)
with open(f'../FileForDemo/Pic3gbizhi/{picName}.jpg', mode='wb') as file:
file.write(picResp.content)

picResp.close()
childResp.close()
print(picName, 'is down load over!')
resp.close()
time.sleep(2)
print('All Over!!!')

我是moore,大家一起加油!!!