爬虫Scrapy框架运用----房天下二手房数据采集

时间:2023-03-09 00:01:23
爬虫Scrapy框架运用----房天下二手房数据采集

在许多电商和互联网金融的公司为了更好地服务用户,他们需要爬虫工程师对用户的行为数据进行搜集、分析和整合,为人们的行为选择提供更多的参考依据,去服务于人们的行为方式,甚至影响人们的生活方式。我们的scrapy框架就是爬虫行业使用的主流框架,房天下二手房的数据采集就是基于这个框架去进行开发的。

  数据采集来源:‘房天下----全国二手房’
  目标数据:省份名、城市名、区域名、房源介绍、房源小区、户型、朝向、楼层、建筑面积、建造时间、单价、楼盘链接

  数据库设计:province、city、area、house四张表

  爬虫spider部分demo:

获取省份、城市信息和链接

 #获取省份名字,城市的链接url
def mycity(self,response):
#获得关键节点
links = response.css('#c02 > ul > li')
for link in links:
try:
province_name=link.xpath('./strong/text()').extract_first()
urllinks=link.xpath('./a')
for urllink in urllinks:
city_url=urllink.xpath('./@href').extract_first()
if city_url[-1]=='/':
city_url=city_url[:-1]
yield scrapy.Request(url=city_url,meta={'province_name':province_name,'city_url':city_url},callback=self.area)
except Exception:
pass

爬虫Scrapy框架运用----房天下二手房数据采集

获取区域的链接url和信息 

 #获取区域的链接url
def area(self,response):
try:
links=response.css('.qxName a')
for link in links[1:]:
area_url=response.url+link.xpath('@href').extract_first()
yield scrapy.Request(url=area_url,meta=response.meta,callback=self.page)
except Exception:
pass

爬虫Scrapy框架运用----房天下二手房数据采集

获取楼盘房源的信息

     def houselist(self,response):
item={}
city_name = response.css('#list_D02_01 > a:nth-child(3)::text').extract_first()
area_name=response.css('#list_D02_01 > a:nth-child(5)::text').extract_first()
if city_name:
item['city_name']=city_name[:-3]
if area_name:
item['area_name']=area_name[:-3]
links=response.xpath('/html/body/div[3]/div[4]/div[5]/dl')
if links:
for link in links:
try:
item['title']=link.xpath('./dd/p[1]/a/text()').extract_first()
house_info=link.xpath('./dd/p[2]/text()').extract()
if house_info:
item['province_name']=response.meta['province_name']
item['house_type']=link.xpath('./dd/p[2]/text()').extract()[0].strip()
item['floor']=link.xpath('./dd/p[2]/text()').extract()[1].strip()
item['oritenation']=link.xpath('./dd/p[2]/text()').extract()[2].strip()
item['build_time']=link.xpath('./dd/p[2]/text()').extract()[3].strip()[5:]
item['house_name']=link.xpath('./dd/p[3]/a/span/text()').extract_first()
item['house_area']=link.xpath('./dd/div[2]/p[1]/text()').extract_first()
item['per_price']=int(link.xpath('./dd/div[3]/p[2]/text()').extract_first()[:-1])
list_url = link.xpath('./dd/p[1]/a/@href').extract_first()
item['house_url']=response.meta['city_url']+list_url
yield item
except Exception:
pass

爬虫Scrapy框架运用----房天下二手房数据采集此时就可以运行scrapy crawl+爬虫名,我们就可以爬取到网站的信息,但是我们如何使用这些数据呢,那就要通过pipelines将数据插入到数据库中。

爬虫pipelines部分demo:

 # -*- coding: utf-8 -*-

 # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql class HousePipeline(object):
def open_spider(self,spider):
self.con=pymysql.connect(user='root',passwd='',db='test',host='localhost',port=3306,charset='utf8')
self.cursor=self.con.cursor(pymysql.cursors.DictCursor)
return spider
def process_item(self, item, spider):
#插入省份表
province_num=self.cursor.execute('select * from home_province where province_name=%s',(item['province_name'],))
if province_num:
province_id=self.cursor.fetchone()['id']
else:
sql='insert into home_province(province_name) values(%s)'
self.cursor.execute(sql,(item['province_name']))
province_id=self.cursor.lastrowid
self.con.commit()
#插入城市表
##规避不同省份城市重名的情况
city_num=self.cursor.execute('select * from home_city where city_name=%s and province_id=%s',(item['city_name'],province_id))
if city_num:
city_id=self.cursor.fetchone()['id']
else:
sql='insert into home_city(city_name,province_id) values(%s,%s)'
self.cursor.execute(sql,(item['city_name'],province_id))
city_id=self.cursor.lastrowid
self.con.commit()
#插入区域表
##规避不同城市区域重名的情况
area_num=self.cursor.execute('select * from home_area where area_name=%s and city_id=%s',(item['area_name'],city_id))
if area_num:
area_id=self.cursor.fetchone()['id']
else:
sql = 'insert into home_area (area_name,city_id,province_id)value(%s,%s,%s)'
self.cursor.execute(sql,(item['area_name'],city_id,province_id))
area_id = self.cursor.lastrowid
self.con.commit()
#插入楼盘信息表
house_num=self.cursor.execute('select house_name from home_house where house_name=%s',( item['house_name'],))
if house_num:
pass
else:
sql = 'insert into home_house(title,house_type,floor,oritenation,build_time,house_name,house_area,per_price,house_url,area_id,city_id,province_id) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
self.cursor.execute(sql, (
item['title'], item['house_type'], item['floor'], item['oritenation'], item['build_time'],
item['house_name'], item['house_area'], item['per_price'],item['house_url'], area_id,city_id,province_id,))
self.con.commit()
return item
def close_spider(self,spider):
self.cursor.close()
self.con.close()
return spider

采集数据效果:

爬虫Scrapy框架运用----房天下二手房数据采集