# Demo Describe:数据解析 re部分
import re
import requests
import csv
from fake_useragent import UserAgent
# start--------1,re-正则表达式----------------------
'''
正则表达式
https://www.runoob.com/regexp/regexp-metachar.html
贪婪匹配:.*
惰性匹配:.*?
案例:
str:<div>米老鼠</div><span>米奇</span>
1.re:<.*>
输出:<div>米老鼠</div><span>米奇</span>
2.re:<.*?>
输出:<div>
</div>
<span>
</span>
3.re:<div>.*?</div>
输出:<div>米老鼠</div>
'''
# end--------1,re-正则表达式----------------------
# start--------2,re模块----------------------
str = '电话1:10086;电话2:10010'
'''
1.findall(re,str)
找出符合正则的结果
2.finditer(re,str)
找出符合正则的结果的迭代器
3.search(re,str)
全文匹配到一个结果就返回该结果的Match
4.match(re,str)
从头匹配到一个结果就返回该结果的Match
5.compile
正则预加载
6.(?P<key>re) 常用-从正则结果中提取需要的内容 (重要)
比如 (?P<Name>.*?)
'''
# 1
lst = re.findall(r'\d+', str)
# print(lst) # ['1', '10086', '2', '10010']
# 2
'''
1
10086
2
10010
'''
lst2 = re.finditer(r'\d+', str)
for i in lst2:
pass
# print(i.group())
# 3
lst3 = re.search(r'\d+', str)
# print(lst3.group()) # 1
# 4
lst4 = re.match(r'\d+', '2008年北京!')
# print(lst4.group()) # 2008
# 5
reRule = re.compile(r'\d+')
lst5 = reRule.match('2008年北京!')
# print(lst5.group()) # 2008
# 6
strHtml = '''
<div class="c-gap-top-small sitelink_summary "><a href="http://book.douban.com/" target="_blank">豆瓣读书</a><p class="c-color-text">记录你读过的、想读和正在读的书,顺便打分,添加标签及个人附注,写评论</p></div>
<div class="c-gap-top-small sitelink_summary "><a href="http://music.douban.com/" target="_blank">豆瓣音乐</a><p class="c-color-text">豆瓣FM客户端 让好音乐继续 豆瓣音乐人客户端 发现当下最酷的独立音</p></div>
<div class="c-gap-top-small sitelink_summary "><a href="http://movie.douban.com/" target="_blank">豆瓣电影</a><p class="c-color-text">豆瓣电影提供最新的电影介绍及评论包括上映影片的影讯查询及购票服务。你</p></div>
<div class="c-gap-top-small sitelink_summary "><a href="http://beijing.douban.com/" target="_blank">豆瓣同城</a><p class="c-color-text">豆瓣同城-北京 北京 同城活动 主办方 舞台剧 北京 上海 广州 武</p></div>
'''
obj = re.compile(r'<div class="c-gap-top-small sitelink_summary ">'
r'<a href="(?P<InterIP>.*?)" target="_blank">(?P<InterName>.*?)</a>'
r'<p class="c-color-text">(?P<InterInfo>.*?)</p></div>', re.S)
ret = obj.finditer(strHtml)
for i in ret:
pass
# print(i.group('InterIP'))
# print(i.group('InterName'))
# print(i.group('InterInfo'))
''' 输出结果
http://book.douban.com/
豆瓣读书
记录你读过的、想读和正在读的书,顺便打分,添加标签及个人附注,写评论
http://music.douban.com/
豆瓣音乐
豆瓣FM客户端 让好音乐继续 豆瓣音乐人客户端 发现当下最酷的独立音
http://movie.douban.com/
豆瓣电影
豆瓣电影提供最新的电影介绍及评论包括上映影片的影讯查询及购票服务。你
http://beijing.douban.com/
豆瓣同城
豆瓣同城-北京 北京 同城活动 主办方 舞台剧 北京 上海 广州 武
'''
# end--------2,re模块----------------------
# start--------3,练习,爬取豆瓣电影排行----------------------
'''
1,拿到页面源代码
2,通过re提取信息
排行前250电影名称,年份,评价人数,评分,简介
'''
# url1 = f'https://movie.douban.com/top250'
# ua = UserAgent()
# user_agent = ua.random
# print(user_agent)
# headers = {
# 'user-agent': user_agent
# }
# resp = requests.get(url1, headers=headers)
# pageContent = resp.text
# resp.close()
# reObj = re.compile(r'<li>.*?<div class="item">.*?<span class="title">(?P<movie>.*?)</span>.*?'
# r'<p class="">.*?<br>(?P<time>.*?) .*?<span class="rating_num" '
# r'property="v:average">(?P<average>.*?)</span>.*?'
# r'<span>(?P<people>.*?)人评价</span>.*?<span class="inq">(?P<introduction>.*?)</span>',re.S)
# ret = reObj.finditer(pageContent)
# with open('../FileForDemo/P3Demo_dataAnalysis_re_douban.csv',mode='w',encoding='utf-8') as file:
# csvWriter = csv.writer(file)
# for i in ret:
# dic = i.groupdict()
# dic['time'] = dic['time'].strip()
# csvWriter.writerow(dic.values())
# print(i.group('movie'))
# print('Down!')
# end--------3,练习,爬取豆瓣电影排行----------------------
# start--------4,练习,爬取电影天堂----------------------
'''
1,定位到电影类别标题
2,定位到标题栏下方URl电影链接页面进入子页面
3,拿到下载链接
'''
domain = 'https://www.dygod.net/'
ua = UserAgent()
user_agent = ua.random
headers = {
'user-agent': user_agent
}
resp = requests.get(domain, headers=headers)
resp.encoding = 'gb2312'
pageConetnt = resp.text
obj1 = re.compile(r'欧美电视剧.*?<div class="co_content222">.*?<ul>(?P<movieUlList>.*?)</ul>', re.S)
obj2 = re.compile(r"<a href='(?P<childHref>.*?)'", re.S)
obj3 = re.compile(r'◎译 名(?P<movieCNName>.*?)<br />.*?◎片 名(?P<movieENName>.*?)<br />'
r'.*?<div style="display:none">(?P<movieDownList>.*?)</div>', re.S)
obj4 = re.compile(r'<a href="(?P<movieDowloadIP>.*?)"', re.S)
movieUlList = obj1.finditer(pageConetnt)
child_herfList = []
for i in movieUlList:
ret1 = i.group('movieUlList')
ret2 = obj2.finditer(ret1)
for i in ret2:
childHref = domain + i.group('childHref').strip('/')
child_herfList.append(childHref)
with open('../FileForDemo/P3Demo_dataAnalysis_re_dygod.csv', mode='w', encoding='UTF-8') as file:
csvWriter = csv.writer(file)
for i in child_herfList:
childResp = requests.get(i, headers=headers)
childResp.encoding = 'gb2312'
childPageConetnt = childResp.text
ret3 = obj3.finditer(childPageConetnt)
for i in ret3:
dic_movieCNName = i.group('movieCNName').strip()
dic_movieENName = i.group('movieENName').strip()
fieldnames = {'movieCNName': dic_movieCNName, 'movieENName': dic_movieENName}
csvWriter.writerow(fieldnames.values())
movieDownList = obj4.finditer(i.group('movieDownList'))
for i in movieDownList:
dic = i.groupdict()
csvWriter.writerow(dic.values())
print(dic_movieCNName)
childResp.close()
resp.close()
print('Down!!!')
# end--------4,练习,爬取电影天堂----------------------