公司要求内部每日整理jira bug发邮件,手动执行了一段时间,想着用自动化的方式实现,故用了3天的时间做出了此脚本。
第一版基础版
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
# -*- coding:utf-8 -*-
import requests
import re
from bs4 import BeautifulSoup as bs
import time
import os
jql = "project = SDP and parent = SDP-13330 AND issuetype in (standardIssueTypes(), subTaskIssueTypes(), BUG) AND status in (新建, 解决中, 重新打开) AND priority in (P0, P1, P2) AND reporter in (membersOf(SDP_Tester)) ORDER BY priority DESC"
username = "robin.li"
password = "a12345678"
tempfilepath = r "f:\bug_list.csv"
outbuglist = r "f:\bug_out" + time.strftime( "%Y%m%d" , time.localtime()) + ".csv"
parentid = "SDP-13330"
bugs_list = []
def findall_data(data,LB = " ",RB=" "):
''' 关联函数左右边界提取数取 '''
rule = LB + r "(.+?)" + RB
datalist = re.findall(rule,data)
return datalist
def get_bugs_list():
'''根据规则提取bug列表'''
if os.path.exists(outbuglist): # 如果文件存在
os.remove(outbuglist)
with open (r "f:\bug_list.csv" , 'r' ) as f:
content = str (f.readlines())[ 1 : - 1 ]
bug = []
soup = bs(content, features = 'html.parser' )
for tr in soup.select( 'tr' ):
for td in tr.select( 'td' ):
bug.append( str (td.text).strip(r''))
bugs_list.append(bug)
bug = []
for bug in bugs_list:
clear_list = str (bug[ 1 : - 2 ]).replace( "\\n" , " ").replace(" \' ", " ").replace(" ", " ").replace(" \, ", " ").replace(" \\xa0 ", " ").replace(parentid, " ").replace(" [ ", " ").replace(" ] ", " ")
print (clear_list)
try :
with open (outbuglist, 'a' ) as f:
f.write(clear_list + "\n" )
except PermissionError:
print ( "\033[31m请关闭已经打开的bug列表" )
return 0
else :
print ( "\n \033[31mbug列表输出地址:" + outbuglist + "\n" + "-" * 50 + "-----bug内容如上:--------\n" )
def login_bugwriter(username, password, jql, tempfilepath):
"""
login_bugwriter(username='str',password='str', jql='str')
登录jira提取bug列表写入文件
username:登录Jira的帐号
password:密码
jql:jira过滤的语法
tempfilepath:过滤的bug文件临时存储目录文件
"""
''''''
data = { 'os_username' : username, 'os_password' : password, 'login' : '登录' }
res = requests.get( "https://jira.clouddeep.cn/secure/Dashboard.jspa" )
if res.status_code = = 200 :
print ( "可以访问Jira,开始提取数据" )
jsession = requests.Session()
cookie_jar = jsession.post( "https://jira.clouddeep.cn/login.jsp" ,data = data).cookies
login_cookie = requests.utils.dict_from_cookiejar(cookie_jar)
print ( "登录成功,整理列表" )
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36" ,
"Sec-Fetch-Dest" : "document" ,
"Sec-Fetch-Mode" : "navigate" ,
"Sec-Fetch-Site" : "none" ,
"Sec-Fetch-User" : "?1" ,
"Upgrade-Insecure-Requests" : "1" ,
}
r = requests.get( "https://jira.clouddeep.cn/browse/SDP-13330" ,headers = headers, cookies = login_cookie)
#print(r.text)
jql_url = "https://jira.clouddeep.cn/issues/SDP-13330/?jql=" + jql
print ( "请确认过滤条件:==>" + jql_url + "\n" + "-" * 30 )
bug_list = requests.get(jql_url ,headers = headers, cookies = login_cookie)
with open (tempfilepath, 'w' ) as f:
f.write(bug_list.text)
else :
print ( "jira无法访问,请检查网络。" )
if __name__ = = '__main__' :
login_bugwriter(username, password, jql, tempfilepath)
get_bugs_list()
|
第二版基础版添加日期相差功能
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
# -*- coding:utf-8 -*-
import requests
import re
from bs4 import BeautifulSoup as bs
import time
import os
from datetime import datetime
jql = "project = SDP and parent = SDP-13330 AND issuetype in (standardIssueTypes(), subTaskIssueTypes(), BUG) AND status in (新建, 解决中, 重新打开) AND priority in (P0, P1, P2) AND reporter in (membersOf(SDP_Tester)) ORDER BY priority DESC"
username = "a.li@rc.cn"
password = "Rc123456"
tempfilepath = r "f:\bug_list.csv"
outbuglist = r "f:\bug_out" + time.strftime( "%Y%m%d" , time.localtime()) + ".csv"
parentid = "SDP-13330"
bugs_list = []
def trans_month(date):
ch_date_dict = { '一' : '01' , '二' : '02' , '三' : '03' , '四' : '04' , '五' : '05' , '六' : '06' , '七' : '07' , '八' : '08' , '九' : '09' , '十' : '10' , '十一' : '11' , '十二' : '12' , }
old_date = date.split( '/' )
ch_date = str (old_date[ 1 ])[ 0 ]
if ch_date in ch_date_dict:
old_date[ 1 ] = ch_date_dict[ch_date]
new_date = "/" .join( str (i) for i in old_date)
return new_date
else :
print ( '日期格式错误' )
def diff_date(d1, d2):
d1 = datetime.strptime(d1, '%d/%m/%y' ).date()
return (d2 - d1).days
def findall_data(data,LB = " ",RB=" "):
''' 关联函数左右边界提取数取 '''
rule = LB + r "(.+?)" + RB
datalist = re.findall(rule,data)
return datalist
def get_bugs_list():
'''根据规则提取bug列表'''
if os.path.exists(outbuglist): # 如果文件存在
os.remove(outbuglist)
with open (r "f:\bug_list.csv" , 'r' ) as f:
content = str (f.readlines())[ 1 : - 1 ]
bug = []
soup = bs(content, features = 'html.parser' )
for tr in soup.select( 'tr' ):
for td in tr.select( 'td' ):
if td.text ! = " \\n" :
print ( '2==' , len (bug))
bug.append( str (td.text).strip(""))
if "<img" in str (td): #提取日期
get_date = tr.time.text # 09:30:45'
d1 = trans_month(get_date)
d2 = datetime.now().date()
diff_days = diff_date(d1, d2)
s = str (td.img[ 'alt' ])
print ( '1==' , len (bug))
bug.insert( len (bug), str (diff_days))
bug.append(s)
#print('1',bug) #print('2',bug) bugs_list.append(bug) #bugs_list = [i for i in bugs_list if i != ''] #print('3',bugs_list) bug = [] for bug in bugs_list: #ps = bug # print(len(bug)) # if len(bug) != 0: clear_list = str(bug[:-2]).replace("\\n", "").replace("\'", "").replace(" ", "").replace("\,", "").replace("\\xa0", "").replace(parentid, "").replace("[", "").replace("]", "") #print(clear_list) #clear_list = clear_list.split(",") #clear_list = [i for i in clear_list if i !=''] print(clear_list) try: with open(outbuglist, 'a') as f: f.write(str(clear_list) + "\n") except PermissionError: print("\033[31m请关闭已经打开的bug列表") return 0 else: print("\n \033[31mbug列表输出地址:" + outbuglist + "\n" + "-"*50 + "-----bug内容如上:--------\n") def login_bugwriter(username, password, jql, tempfilepath): """ login_bugwriter(username='str',password='str', jql='str') 登录jira提取bug列表写入文件 username:登录Jira的帐号 password:密码 jql:jira过滤的语法 tempfilepath:过滤的bug文件临时存储目录文件 """ '''''' data = {'os_username': username, 'os_password': password, 'login': '登录'} res = requests.get("https://jira.clouddeep.cn/secure/Dashboard.jspa") if res.status_code == 200: print("可以访问Jira,开始提取数据") jsession = requests.Session() cookie_jar = jsession.post("https://jira.clouddeep.cn/login.jsp",data=data).cookies login_cookie = requests.utils.dict_from_cookiejar(cookie_jar) print("登录成功,整理列表") headers={ "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36", "Sec-Fetch-Dest": "document", "Sec-Fetch-Mode": "navigate", "Sec-Fetch-Site": "none", "Sec-Fetch-User": "?1", "Upgrade-Insecure-Requests": "1", } r=requests.get("https://jira.clouddeep.cn/browse/SDP-13330",headers=headers, cookies=login_cookie) #print(r.text) jql_url="https://jira.clouddeep.cn/issues/SDP-13330/?jql=" + jql print("请确认过滤条件:==>" + jql_url + "\n" + "-"*30) bug_list = requests.get(jql_url ,headers=headers, cookies=login_cookie) with open(tempfilepath,'w') as f: f.write(bug_list.text) else: print("jira无法访问,请检查网络。") if __name__ == '__main__': #login_bugwriter(username, password, jql, tempfilepath) get_bugs_list()
|
第三版优化了提取器,实现一键提取bug
实现的代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
|
# -*- coding:utf-8 -*-
import requests
import re
from bs4 import BeautifulSoup as bs
import time
import os
from datetime import datetime
parentid = "SDP-15642" #bug汇总 jira号
#查询条件
jql = "project = SDP and parent = " + parentid + " AND issuetype in (standardIssueTypes(), subTaskIssueTypes(), BUG) AND status in (新建, 解决中, 重新打开,解决中) AND priority in (P0, P1, P2) AND reporter in (membersOf(SDP_Tester)) ORDER BY priority DESC"
tr_title = "JIRA链接,详细描述,BUG等级,经办人,报告人,JIRA状态,JIRA创建时间,bug持续时间" #["JIRA链接","详细描述","BUG等级","经办人","报告人","JIRA状态","JIRA创建时间","bug持续时间"]
username = "aa.li@ss.cn"
password = "ss2018()"
tempfilepath = r "./bug_list.csv"
outbuglist = r "./bug_out" + time.strftime( "%Y%m%d" , time.localtime()) + ".csv"
# orderbuglist = r"./order_bug_out" + time.strftime("%Y%m%d", time.localtime()) + ".csv"
bugs_list = []
def trans_month(date):
ch_date_dict = { '一' : '01' , '二' : '02' , '三' : '03' , '四' : '04' , '五' : '05' , '六' : '06' , '七' : '07' , '八' : '08' , '九' : '09' , '十' : '10' , '十一' : '11' , '十二' : '12' , }
old_date = date.split( '/' )
ch_date = str (old_date[ 1 ])[ 0 ]
if ch_date in ch_date_dict:
old_date[ 1 ] = ch_date_dict[ch_date]
new_date = "/" .join( str (i) for i in old_date)
return new_date
else :
print ( '日期格式错误' )
def diff_date(d1, d2):
d1 = datetime.strptime(d1, '%d/%m/%y' ).date()
return (d2 - d1).days
def findall_data(data,LB = " ",RB=" "):
''' 关联函数左右边界提取数取 '''
rule = LB + r "(.+?)" + RB
datalist = re.findall(rule,data)
return datalist
def get_bugs_list():
'''根据规则提取bug列表'''
# if os.path.exists(outbuglist): # 如果文件存在
# os.remove(outbuglist)
with open (r "f:\bug_list.csv" , 'r' ) as f:
content = str (f.readlines())[ 1 : - 1 ]
bug = []
soup = bs(content, features = 'html.parser' )
for tr in soup.select( 'tr' ):
for td in tr.select( 'td' ):
str1 = re.sub(r "[\\n'\s]" ,"", str (td.text))
str2 = re.sub( "[\s ,\r\n]{1,99}" , "," , str1)
if str2 ! = "":
bug.append( str (str2).strip(""))
if "<img" in str (td): #提取日期
get_date = tr.time.text # 09:30:45'
d1 = trans_month(get_date)
d2 = datetime.now().date()
diff_days = diff_date(d1, d2)
s = str (td.img[ 'alt' ])
bug.insert( len (bug), str (diff_days))
bug.append(s)
bugs_list.append(bug)
bug = []
else :
print ( "\n \033[31mbug列表输出地址:" + "\n" + "-" * 50 + "-----bug内容如上:--------\n" )
return bugs_list
def login_bugwriter(username, password, jql, tempfilepath):
"""
login_bugwriter(username='str',password='str', jql='str')
登录jira提取bug列表写入文件
username:登录Jira的帐号
password:密码
jql:jira过滤的语法
tempfilepath:过滤的bug文件临时存储目录文件
"""
''''''
data = { 'os_username' : username, 'os_password' : password, 'login' : '登录' }
res = requests.get( "https://jira.clouddeep.cn/secure/Dashboard.jspa" )
if res.status_code = = 200 :
print ( "可以访问Jira,开始提取数据" )
jsession = requests.Session()
cookie_jar = jsession.post( "https://jira.clouddeep.cn/login.jsp" ,data = data).cookies
login_cookie = requests.utils.dict_from_cookiejar(cookie_jar)
print ( "登录成功,整理列表" )
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36" ,
"Sec-Fetch-Dest" : "document" ,
"Sec-Fetch-Mode" : "navigate" ,
"Sec-Fetch-Site" : "none" ,
"Sec-Fetch-User" : "?1" ,
"Upgrade-Insecure-Requests" : "1" ,
}
r = requests.get( "https://jira.clouddeep.cn/browse/" + parentid,headers = headers, cookies = login_cookie)
jql_url = "https://jira.clouddeep.cn/issues/" + parentid + "/?jql=" + jql
print ( "请确认过滤条件:==>" + jql_url + "\n" + "-" * 30 )
bug_list = requests.get(jql_url ,headers = headers, cookies = login_cookie)
with open (tempfilepath, 'w' ) as f:
f.write(bug_list.text)
else :
print ( "jira无法访问,请检查网络。" )
def order_buglist(buglist):
with open (outbuglist, 'w' ) as f:
f.write(tr_title + '\n' )
print (tr_title)
for l in buglist:
if len (l) < = 1 :
print ( "列表长度小于1" )
else :
s = l[ 3 : 4 ] + str (l[ 4 : 5 ]).split( "," )[ 2 : 3 ] + l[ 2 : 3 ] + l[ 9 : 11 ] + l[ 5 : 6 ] + l[ 8 : 9 ] + l[ 1 : 2 ]
s2 = re.sub(r "'" ,"", str (s)[ 3 : - 1 ])
s3 = re.sub(r "[, ]{1,8}" , "," ,s2)
f.write( "https://jira.clouddeep.cn/browse/" + str (s3) + '\n' )
print ( 'out>>' ,s3)
print ( "bug列表输出地址:\n" ,outbuglist)
if os.path.exists(tempfilepath):
os.remove(tempfilepath)
else :
print ( "no such file: %s" % tempfilepath)
if __name__ = = '__main__' : login_bugwriter(username, password, jql, tempfilepath) bug = get_bugs_list() order_buglist(bug)
|
实现一键提取:
编写2个bat文件:
init_env.bat #初始化环境,安装所需的库
1
2
|
pip install requests
pip install bs4
|
get_jira_bugs.bat #调用主程序实现bug列表生成一个csv文件。
1
2
3
4
|
@ECHO OFF
python . / jiraCollection.py
echo run success!
pause
|
到此这篇关于python实现提取jira bug列表的方法示例的文章就介绍到这了,更多相关python提取jira bug列表内容请搜索服务器之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持服务器之家!
原文链接:https://blog.csdn.net/qdPython/article/details/109532124