# -*- coding: utf-8 -*-
import urllib
from bs4 import BeautifulSoup
from time import time
from time import ctime
def get_last_info():
url='http://ggjd.cnstock.com/gglist/search/ggkx/0'
html=urllib.urlopen(url)
bsobj2=BeautifulSoup(html,"lxml")
alltext=bsobj2.body.findAll("ul",class_="new-list")
#print alltext[1].get_text()#ok,需要的信息在第二个new-list
l_2=len(alltext)
file_open=open(r'C:\Users\Administrator\Desktop\securities\securities_info1.txt','w')
t1= alltext[1].get_text()
t1=t1.encode("utf-8")
t1="<获取信息时间>:%s\n"%ctime()+url+'\n'+"中国证券网->上市公司专区->信息披露与公告解读->公告快讯:"+t1
print t1
file_open.write(t1)
file_open.close()
if __name__=="__main__":
get_last_info()