网络爬虫(又被称为网页蜘蛛,网络机器人,在FOAF社区中间,更经常的称为网页追逐者),是一种按照一定的规则,自动的抓取万维网信息的程序或者脚本。
下面有一个示例代码,分享给大家:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
#! /usr/bin/env python
#
encoding = 'utf-8' #
Filename: spider_58center_sth.py
from bs4
import BeautifulSoup
import time
import requests
url_58 = 'http://nj.58.com/?PGTID=0d000000-0000-0c5c-ffba-71f8f3f7039e&ClickID=1'
''
'
用于爬取电商售卖信息: 例为 58 同城电脑售卖信息 ''
'
def get_url_list(url):
web_data = requests.get(url)
soup = BeautifulSoup(web_data.text, 'lxml' )
url = soup.select( 'td.t > a[class="t"]' )
url_list = ''
for link in url:
link_n = link.get( 'href' )
if 'zhuanzhuan' in link_n:
pass
else :
if 'jump' in link_n:
pass
else :
url_list = url_list + '\n' + link_n
print ( 'url_list: %s' % url_list)
return url_list # 分类获取目标信息
def get_url_info():
url_list = get_url_list(url_58)
for url in url_list.split():
time.sleep( 1 )
web_datas = requests.get(url)
soup = BeautifulSoup(web_datas.text, 'lxml' )
type = soup.select( '#head > div.breadCrumb.f12 > span:nth-of-type(3) > a' )
title = soup.select( ' div.col_sub.mainTitle > h1' )
date = soup.select( 'li.time' )
price = soup.select( 'div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.summary > ul > '
'li:nth-of-type(1) > div.su_con > span.price.c_f50' )
fineness = soup.select( 'div.col_sub.summary > u1 > li:nth-of-type(2) > div.su_con > span' )
area = soup.select( 'div.col_sub.summary > u1 > li:nth-of-type(3) > div.su_con > span' )
for typei, titlei, datei, pricei, finenessi, areai in zip ( type , title, date, price, fineness, area): #做字典
data = {
'type' : typei.get_text(),
'title' : titlei.get_text(),
'date' : datei.get_text(),
'price' : pricei.get_text(),
'fineness' : (finenessi.get_text()).strip(),
'area' : list (areai.stripped_strings)
}
print (data)
get_url_info()
|
爬取商城商品售卖信息
总结
以上就是本文关于Python探索之爬取电商售卖信息代码示例的全部内容,希望对大家有所帮助。如有不足之处,欢迎留言指出。感谢朋友们对本站的支持!
原文链接:http://www.cnblogs.com/DeRozan/p/7660686.html