西刺代理是一个国内ip代理,由于代理倒闭了,所以我就把原来的代码放出来供大家学习吧。
镜像地址:https://www.blib.cn/url/xcdl.html
首先找到所有的tr标签,与class="odd"的标签,然后提取出来。
然后再依次找到tr标签里面的所有td标签,然后只提取出里面的[1,2,5,9]这四个标签的位置,其他的不提取。
最后可以写出提取单一页面的代码,提取后将其保存到文件中。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
import sys,re,threading
import requests,lxml
from queue import queue
import argparse
from bs4 import beautifulsoup
head = { "user-agent" : "mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.100 safari/537.36" }
if __name__ = = "__main__" :
ip_list = []
fp = open ( "spideraddr.json" , "a+" ,encoding = "utf-8" )
url = "https://www.blib.cn/url/xcdl.html"
request = requests.get(url = url,headers = head)
soup = beautifulsoup(request.content, "lxml" )
data = soup.find_all(name = "tr" ,attrs = { "class" : re. compile ( "|[^odd]" )})
for item in data:
soup_proxy = beautifulsoup( str (item), "lxml" )
proxy_list = soup_proxy.find_all(name = "td" )
for i in [ 1 , 2 , 5 , 9 ]:
ip_list.append(proxy_list[i].string)
print ( "[+] 爬行列表: {} 已转存" . format (ip_list))
fp.write( str (ip_list) + '\n' )
ip_list.clear()
|
爬取后会将文件保存为 spideraddr.json 格式。
最后再使用另一段代码,将其转换为一个ssr代理工具直接能识别的格式,{'http': 'http://119.101.112.31:9999'}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
import sys,re,threading
import requests,lxml
from queue import queue
import argparse
from bs4 import beautifulsoup
if __name__ = = "__main__" :
result = []
fp = open ( "spideraddr.json" , "r" )
data = fp.readlines()
for item in data:
dic = {}
read_line = eval (item.replace( "\n" ,""))
protocol = read_line[ 2 ].lower()
if protocol = = "http" :
dic[protocol] = "http://" + read_line[ 0 ] + ":" + read_line[ 1 ]
else :
dic[protocol] = "https://" + read_line[ 0 ] + ":" + read_line[ 1 ]
result.append(dic)
print (result)
|
完整多线程版代码如下所示。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
|
import sys,re,threading
import requests,lxml
from queue import queue
import argparse
from bs4 import beautifulsoup
head = { "user-agent" : "mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.100 safari/537.36" }
class agentspider(threading.thread):
def __init__( self ,queue):
threading.thread.__init__( self )
self ._queue = queue
def run( self ):
ip_list = []
fp = open ( "spideraddr.json" , "a+" ,encoding = "utf-8" )
while not self ._queue.empty():
url = self ._queue.get()
try :
request = requests.get(url = url,headers = head)
soup = beautifulsoup(request.content, "lxml" )
data = soup.find_all(name = "tr" ,attrs = { "class" : re. compile ( "|[^odd]" )})
for item in data:
soup_proxy = beautifulsoup( str (item), "lxml" )
proxy_list = soup_proxy.find_all(name = "td" )
for i in [ 1 , 2 , 5 , 9 ]:
ip_list.append(proxy_list[i].string)
print ( "[+] 爬行列表: {} 已转存" . format (ip_list))
fp.write( str (ip_list) + '\n' )
ip_list.clear()
except exception:
pass
def startthread(count):
queue = queue()
threads = []
for item in range ( 1 , int (count) + 1 ):
url = "https://www.xicidaili.com/nn/{}" . format (item)
queue.put(url)
print ( "[+] 生成爬行链接 {}" . format (url))
for item in range (count):
threads.append(agentspider(queue))
for t in threads:
t.start()
for t in threads:
t.join()
# 转换函数
def conversionagentip(filename):
result = []
fp = open (filename, "r" )
data = fp.readlines()
for item in data:
dic = {}
read_line = eval (item.replace( "\n" ,""))
protocol = read_line[ 2 ].lower()
if protocol = = "http" :
dic[protocol] = "http://" + read_line[ 0 ] + ":" + read_line[ 1 ]
else :
dic[protocol] = "https://" + read_line[ 0 ] + ":" + read_line[ 1 ]
result.append(dic)
return result
if __name__ = = "__main__" :
parser = argparse.argumentparser()
parser.add_argument( "-p" , "--page" ,dest = "page" , help = "指定爬行多少页" )
parser.add_argument( "-f" , "--file" ,dest = "file" , help = "将爬取到的结果转化为代理格式 spideraddr.json" )
args = parser.parse_args()
if args.page:
startthread( int (args.page))
elif args. file :
dic = conversionagentip(args. file )
for item in dic:
print (item)
else :
parser.print_help()
|
以上就是python多线程爬取西刺代理的示例代码的详细内容,更多关于python多线程爬取代理的资料请关注服务器之家其它相关文章!
原文链接:https://www.cnblogs.com/LyShark/p/13850457.html