# coding=utf-8
import requests
import json
from retrying import retry
import re
class TyY:
def __init__(self):
self.url = ["http://www.tylaw.cn/cgi-bin/GLaw.dll?DispInfo&nid={}".format(i) for i in range(3723, 3752)]
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36"
}
@retry(stop_max_attempt_number=3) # 设置最大请求连接数
def _parse_url(self, url):
response = requests.get(url, headers=self.headers, timeout=3)
assert response.status_code == 200
response.encoding = "gb2312"
return response.text
def parse_url(self, url):
try:
html = self._parse_url(url)
except:
html = None
return html
def get_content_list(self, html):
content_list = []
item = {}
item['name'] = re.findall(r"<p align='center' class='titlc'>.*?律师:(.*?)简介.*?</p>", html, re.S)[0] if len(re.findall(r"<p align='center' class='titlc'>.*?律师:(.*?)简介.*?</p>", html, re.S)) > 0 else None
item['email'] = re.findall(r"<div class='contc'>.*?【电子信箱】(.*?) </div>", html, re.S)[0] if len(re.findall(r"<div class='contc'>.*?【电子信箱】(.*?) </div>", html, re.S)) > 0 else None
content_list.append(item)
# print(item)
return content_list
def save_content(self, content_list):
with open("tyy.json", "a") as f:
for content in content_list:
json.dump(content, f, ensure_ascii=False)
f.write(',\n')
def run(self):
url_list = self.url
for url in url_list:
html = self.parse_url(url)
content_list = self.get_content_list(html)
self.save_content(content_list)
if __name__ == '__main__':
tyy = TyY()
tyy.run()