目的:
是学习python 多线程的工作原理,及通过抓取400张图片这种IO密集型应用来查看多线程效率对比
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
import requests
import urlparse
import os
import time
import threading
import Queue
path = '/home/lidongwei/scrapy/owan_img_urls.txt'
#path = '/home/lidongwei/scrapy/cc.txt'
fetch_img_save_path = '/home/lidongwei/scrapy/owan_imgs/'
# 读取保存再文件里面400个urls
with open (path) as f :
urls = f.readlines()
urls = urls[: 400 ]
# 使用Queue来线程通信,因为队列是线程安全的(就是默认这个队列已经有锁)
q = Queue.Queue()
for url in urls:
q.put(url)
start = time.time()
def fetch_img_func(q):
while True :
try :
# 不阻塞的读取队列数据
url = q.get_nowait()
i = q.qsize()
except Exception, e:
print e
break ;
print 'Current Thread Name Runing %s ... 11' % threading.currentThread().name
url = url.strip()
img_path = urlparse.urlparse(url).path
ext = os.path.splitext(img_path)[ 1 ]
print 'handle %s pic... pic url %s ' % (i, url)
res = requests.get(url, stream = True )
if res.status_code = = 200 :
save_img_path = '%s%s%s' % (fetch_img_save_path, i, ext)
# 保存下载的图片
with open (save_img_path, 'wb' ) as fs:
for chunk in res.iter_content( 1024 ):
fs.write(chunk)
print 'save %s pic ' % i
# 可以开多个线程测试不同效果
t1 = threading.Thread(target = fetch_img_func, args = (q, ), name = "child_thread_1" )
#t2 = threading.Thread(target=fetch_img_func, args=(q, ), name="child_thread_2")
#t3 = threading.Thread(target=fetch_img_func, args=(q, ), name="child_thread_3")
#t4 = threading.Thread(target=fetch_img_func, args=(q, ), name="child_thread_4")
t1.start()
#t2.start()
#t3.start()
#t4.start()
t1.join()
#t2.join()
#t3.join()
#t4.join()
end = time.time()
print 'Done %s ' % (end - start)
|
实验结果
400图片
1
2
3
4
|
4 线程 Done 12.443133831
3 线程 Done 12.9201757908
2 线程 Done 32.8628299236
1 线程 Done 54.6115460396
|
总结
Python 自带GIL 大锁, 没有真正意义上的多线程并行执行。GIL 大锁会在线程阻塞的时候释放,此时等待的线程就可以激活工作,这样如此类推,大大提高IO阻塞型应用的效率。