#-*- coding:utf-8 -*-
import urllib
import urllib2
from bs4 import beautifulsoup4 #获取标签下的内容
#打开网页,获取源码
x=0
url='http://www.dbmeinv.com/?pager_offset=1'
def crawl(url): #取名字,最好见名思义
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
req=urllib2.Request(url,headers=headers) #浏览器帽子
page=urllib2.urlopen(req,timeout=20) #打开网页
contents=page.read()#获取源码
#print contents
#html.parser是自带的解析方式,lxml功能大
soup=BeautifulSoup(contents,'html.parser')#创建一个soup对象
my_girl=soup.find_all('img')#找到所有的标签
print(my_girl)
for girl in my_girl:#遍历list,选取属性
link=girl.get('src')#获取src图片路径
print(link)
#下载的文件,取名字
global x
urllib.urlretrieve(link,'image\%s.jpg'%x)
x+=1
print crawl(url)
以上代码在3.5环境下运行一下代码可以成功爬到各图片链接
#-*- coding:utf-8 -*-
import urllib.request
from bs4 import BeautifulSoup #获取标签下的内容
#打开网页,获取源码 x = 0
url = 'http://www.dbmeinv.com/?pager_offset=1'
def crawl(url):
print('')
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
print('')
req = urllib.request.Request(url, headers=headers)
page = urllib.request.urlopen(req)
#req = urllib3.request(url, headers=headers) #浏览器帽子
print('')
#page = urllib3.urlopen(req, timeout=20) #打开网页
contents = page.read()#获取源码
soup = BeautifulSoup(contents,'html.parser')#创建一个soup对象
my_girl = soup.find_all('img')#找到所有的标签
print(my_girl)
for girl in my_girl:
link = girl.get('src')
print(link)
print('')
print('')
crawl(url)