__import__函数
我们都知道import是导入模块的,但是其实import实际上是使用builtin函数import来工作的。在一些程序中,我们可以动态去调用函数,如果我们知道模块的名称(字符串)的时候,我们可以很方便的使用动态调用
1
2
3
|
def getfunctionbyname(module_name, function_name):
module = __import__ (module_name)
return getattr (module, function_name)
|
通过这段代码,我们就可以简单调用一个模块的函数了
插件系统开发流程
一个插件系统运转工作,主要进行以下几个方面的操作
- 获取插件,通过对一个目录里的.py文件扫描得到
- 将插件目录加入到环境变量sys.path
- 爬虫将扫描好的 url 和网页源码传递给插件
- 插件工作,工作完毕后将主动权还给扫描器
插件系统代码
在lib/core/plugin.py中创建一个spiderplus类,实现满足我们要求的代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
|
# __author__ = 'mathor'
import os
import sys
class spiderplus( object ):
def __init__( self , plugin, disallow = []):
self .dir_exploit = []
self .disallow = [ '__init__' ]
self .disallow.extend(disallow)
self .plugin = os.getcwd() + '/' + plugin
sys.path.append(plugin)
def list_plusg( self ):
def filter_func( file ):
if not file .endswith( '.py' ):
return false
for disfile in self .disallow:
if disfile in file :
return false
return true
dir_exploit = filter (filter_func, os.listdir( self .plugin)
return list (dir_exploit)
def work( self , url, html):
for _plugin in self .list_plusg():
try :
m = __import__ (_plugin.split( '.' )[ 0 ])
spider = getattr (m, 'spider' )
p = spider()
s = p.run(url, html)
except exception as e:
print (e)
|
work函数中需要传递 url,html,这个就是我们扫描器传给插件系统的,通过代码
1
2
3
|
spider = getattr (m, 'spider' )
p = spider()
s = p.run(url, html)
|
我们定义插件必须使用class spider中的run方法调用
扫描器中调用插件
我们主要用爬虫调用插件,因为插件需要传递 url 和网页源码这两个参数,所以我们在爬虫获取到这两个的地方加入插件系统代码即可
首先打开spider.py,在spider.py文件开头加上
1
|
from lib.core import plugin
|
然后在文件的末尾加上
1
2
3
|
disallow = [ 'sqlcheck' ]
_plugin = plugin.spiderplus( 'script' , disallow)
_plugin.work(_str[ 'url' ], _str[ 'html' ])
|
disallow是不允许的插件列表,为了方便测试,我们可以把 sqlcheck 填上
sql 注入融入插件系统
其实非常简单,只需要修改script/sqlcheck.py为下面即可
关于download模块,其实就是downloader模块,把downloader.py复制一份命名为download.py就行
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
import re, random
from lib.core import download
class spider:
def run( self , url, html):
if ( not url.find( "?" )): # pseudo-static page
return false;
downloader = download.downloader()
boolean_tests = ( " and %d=%d" , " or not (%d=%d)" )
dbms_errors = {
# regular expressions used for dbms recognition based on error message response
"mysql" : (r "sql syntax.*mysql" , r "warning.*mysql_.*" , r "valid mysql result" , r "mysqlclient\." ),
"postgresql" : (r "postgresql.*error" , r "warning.*\wpg_.*" , r "valid postgresql result" , r "npgsql\." ),
"microsoft sql server" : (r "driver.* sql[\-\_\ ]*server" , r "ole db.* sql server" , r "(\w|\a)sql server.*driver" , r "warning.*mssql_.*" , r "(\w|\a)sql server.*[0-9a-fa-f]{8}" , r "(?s)exception.*\wsystem\.data\.sqlclient\." , r "(?s)exception.*\wroadhouse\.cms\." ),
"microsoft access" : (r "microsoft access driver" , r "jet database engine" , r "access database engine" ),
"oracle" : (r "\bora-[0-9][0-9][0-9][0-9]" , r "oracle error" , r "oracle.*driver" , r "warning.*\woci_.*" , r "warning.*\wora_.*" ),
"ibm db2" : (r "cli driver.*db2" , r "db2 sql error" , r "\bdb2_\w+\(" ),
"sqlite" : (r "sqlite/jdbcdriver" , r "sqlite.exception" , r "system.data.sqlite.sqliteexception" , r "warning.*sqlite_.*" , r "warning.*sqlite3::" , r "\[sqlite_error\]" ),
"sybase" : (r "(?i)warning.*sybase.*" , r "sybase message" , r "sybase.*server message.*" ),
}
_url = url + "%29%28%22%27"
_content = downloader.get(_url)
for (dbms, regex) in ((dbms, regex) for dbms in dbms_errors for regex in dbms_errors[dbms]):
if (re.search(regex,_content)):
return true
content = {}
content[ 'origin' ] = downloader.get(_url)
for test_payload in boolean_tests:
# right page
randint = random.randint( 1 , 255 )
_url = url + test_payload % (randint, randint)
content[ "true" ] = downloader.get(_url)
_url = url + test_payload % (randint, randint + 1 )
content[ "false" ] = downloader.get(_url)
if content[ "origin" ] = = content[ "true" ] ! = content[ "false" ]:
return "sql found: %" % url
|
e-mail 搜索插件
最后一个简单的例子,搜索网页中的 e-mail,因为插件系统会传递网页源码,我们用一个正则表达式([\w-]+@[\w-]+\.[\w-]+)+搜索出所有的邮件。创建script/email_check.py文件
# __author__ = 'mathor'
1
2
3
4
5
6
7
8
9
10
|
import re
class spider():
def run( self , url, html):
#print(html)
pattern = re. compile (r '([\w-]+@[\w-]+\.[\w-]+)+' )
email_list = re.findall(pattern, html)
if (email_list):
print (email_list)
return true
return false
|
运行python w8ay.py
可以看到网页中的邮箱都被采集到了
总结
以上就是这篇文章的全部内容了,希望本文的内容对大家的学习或者工作具有一定的参考学习价值,如果有疑问大家可以留言交流,谢谢大家对服务器之家的支持。
原文链接:https://www.wmathor.com/index.php/archives/1193/