python爬虫——使用bs4爬取链家网的房源信息

时间:2024-03-14 21:45:53

1. 先看效果

python爬虫——使用bs4爬取链家网的房源信息
python爬虫——使用bs4爬取链家网的房源信息

2. 进入链家网,这里我选择的是海口市点击跳转到链家网

python爬虫——使用bs4爬取链家网的房源信息

3. 先看网页的结构,这些房子的信息都在li标签,而li标签再ul标签,所以怎么做大家都懂

python爬虫——使用bs4爬取链家网的房源信息

4. 代码如下,url的链接大家可以自己翻页看看就找到规律了,这里就不多说了

url = 'https://hk.lianjia.com/ershoufang/pg{}/'.format(page)
# 请求url
resp = requests.get(url, headers=headers)
# 讲返回体转换成Beautiful
soup = BeautifulSoup(resp.content, 'lxml')
# 筛选全部的li标签
sellListContent = soup.select('.sellListContent li.LOGCLICKDATA')

5. 再看看我们需要提取信息的结构

python爬虫——使用bs4爬取链家网的房源信息

6. 代码附上,其中会做一些数据清洗,大家爬的时候就知道了,对了,其中stripped_strings返回来的是一个迭代器,所以需要转换成list

# 循环遍历
    for sell in sellListContent:
        try:
            # 题目
            title = sell.select('div.title a')[0].string
            # 先抓取全部的div信息,再针对每一条进行提取
            houseInfo = list(sell.select('div.houseInfo')[0].stripped_strings)
            # 楼盘名字
            loupan = houseInfo[0]
            # 对剩下的信息进行分割
            info = houseInfo[1].split('|')
            # 房子类型
            house_type = info[1].strip()
            # 面积
            area = info[2].strip()
            # 朝向
            toward = info[3].strip()
            # 装修类型
            renovation = info[4].strip()
            # 地址
            positionInfo = ''.join(list(sell.select('div.positionInfo')[0].stripped_strings))
            # 总价
            totalPrice = ''.join(list(sell.select('div.totalPrice')[0].stripped_strings))
            # 单价
            unitPrice = list(sell.select('div.unitPrice')[0].stripped_strings)[0]

            # 声明一个字典存储数据
            data_dict = {}
            data_dict['title'] = title
            data_dict['loupan'] = loupan
            data_dict['house_type'] = house_type
            data_dict['area'] = area
            data_dict['toward'] = toward
            data_dict['renovation'] = renovation
            data_dict['positionInfo'] = positionInfo
            data_dict['totalPrice'] = totalPrice
            data_dict['unitPrice'] = unitPrice

            data_list.append(data_dict)
            print(data_dict)

7. 完整代码附上

import requests
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
import json
import csv
import time


# 构建请求头
ua = UserAgent()
headers = {
    'user-agent': ua.Chrome
}

# 声明一个列表存储字典
data_list = []


def start_spider(page):
    url = 'https://hk.lianjia.com/ershoufang/pg{}/'.format(page)
    # 请求url
    resp = requests.get(url, headers=headers)
    # 讲返回体转换成Beautiful
    soup = BeautifulSoup(resp.content, 'lxml')
    # 筛选全部的li标签
    sellListContent = soup.select('.sellListContent li.LOGCLICKDATA')
    # 循环遍历
    for sell in sellListContent:
        try:
            # 题目
            title = sell.select('div.title a')[0].string
            # 先抓取全部的div信息,再针对每一条进行提取
            houseInfo = list(sell.select('div.houseInfo')[0].stripped_strings)
            # 楼盘名字
            loupan = houseInfo[0]
            # 对剩下的信息进行分割
            info = houseInfo[1].split('|')
            # 房子类型
            house_type = info[1].strip()
            # 面积
            area = info[2].strip()
            # 朝向
            toward = info[3].strip()
            # 装修类型
            renovation = info[4].strip()
            # 地址
            positionInfo = ''.join(list(sell.select('div.positionInfo')[0].stripped_strings))
            # 总价
            totalPrice = ''.join(list(sell.select('div.totalPrice')[0].stripped_strings))
            # 单价
            unitPrice = list(sell.select('div.unitPrice')[0].stripped_strings)[0]

            # 声明一个字典存储数据
            data_dict = {}
            data_dict['title'] = title
            data_dict['loupan'] = loupan
            data_dict['house_type'] = house_type
            data_dict['area'] = area
            data_dict['toward'] = toward
            data_dict['renovation'] = renovation
            data_dict['positionInfo'] = positionInfo
            data_dict['totalPrice'] = totalPrice
            data_dict['unitPrice'] = unitPrice

            data_list.append(data_dict)
            print(data_dict)
        except Exception as e:
            continue


def main():

    # 只爬取10页
    for page in range(1, 10):
        start_spider(page)
        time.sleep(3)

    # 将数据写入json文件
    with open('data_json.json', 'a+', encoding='utf-8') as f:
        json.dump(data_list, f, ensure_ascii=False, indent=4)
    print('json文件写入完成')

    # 将数据写入csv文件
    with open('data_csv.csv', 'w', encoding='utf-8', newline='') as f:
        # 表头
        title = data_list[0].keys()
        # 创建writer对象
        writer = csv.DictWriter(f, title)
        # 写入表头
        writer.writeheader()
        # 批量写入数据
        writer.writerows(data_list)
    print('csv文件写入完成')


if __name__ == '__main__':

    main()