一个一个下载 要2个多小时。就直接起了个线程池。效果明显。
import urllib2
from urlparse import urlparse
uri = 'http://******/patch****'
d = urllib2.urlopen(uri)
res = urlparse(uri)
f = open('c:/' + res.path, 'wb')
f.write(d.read())
f.close()
print 'over'
import urllib2
from urlparse import urlparse
import Queue
import threading
mf = open('rest.txt', 'r')
urls = []
while 1:
line = mf.readline()
if not line:
break
urls.append(line.replace('\r\n', ''))
mf.close()
queue = Queue.Queue()
class ThreadUrl(threading.Thread):
def __init__(self, queue, root):
threading.Thread.__init__(self)
self.queue = queue
self.root = root
def run(self):
while True:
uri = self.queue.get()
print uri
d = urllib2.urlopen(uri)
res = urlparse(uri)
f = open(self.root + res.path, 'wb')
f.write(d.read())
f.close()
self.queue.task_done()
print 'task done'
rootdir = 'C:/pathes/'
for i in range(4):
t = ThreadUrl(queue, rootdir)
t.setDaemon(True)
t.start()
for uri in urls:
queue.put(uri)
queue.join()
print 'over'