Java爬虫学习(3)之用对象保存新浪微博博文

时间:2023-03-10 03:49:43
Java爬虫学习(3)之用对象保存新浪微博博文
 package com.mieba;

 import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.processor.PageProcessor; public class SinaPageProcessor implements PageProcessor
{
public static final String URL_LIST = "http://blog\\.sina\\.com\\.cn/s/articlelist_1487828712_0_\\d+\\.html"; public static final String URL_POST = "http://blog\\.sina\\.com\\.cn/s/blog_\\w+\\.html"; private Site site = Site.me().setTimeOut(10000).setRetryTimes(3).setSleepTime(1000).setCharset("UTF-8").setUserAgent( "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31");; @Override
public Site getSite()
{
// TODO Auto-generated method stub
return site;
} @Override
public void process(Page page)
{
// TODO Auto-generated method stub
// 列表页 if (page.getUrl().regex(URL_LIST).match())
{
// 从页面发现后续的url地址来抓取
page.addTargetRequests(page.getHtml().xpath("//div[@class=\"articleList\"]").links().regex(URL_POST).all()); page.addTargetRequests(page.getHtml().links().regex(URL_LIST).all()); // 文章页 } else
{
String title = new String();
String content = new String();
Article ar = new Article(title, content);
// 定义如何抽取页面信息,并保存下来
ar.setTitle(page.getHtml().xpath("//div[@class='articalTitle']/h2/text()").toString()); ar.setContent(
page.getHtml().xpath("//div[@id='articlebody']//div[@class='articalContent']/text()").toString());
System.out.println("title:"+ar.getTitle());
System.out.println(ar.getContent());
page.putField("repo", ar);
// page.putField("date", page.getHtml().xpath("//div[@id='articlebody']//span[@class='time SG_txtc']/text()").regex("\\((.*)\\)")); }
} }
 package com.mieba;

 import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Vector; import us.codecraft.webmagic.ResultItems;
import us.codecraft.webmagic.Task;
import us.codecraft.webmagic.pipeline.Pipeline; public class SinaPipeline implements Pipeline
{ @Override
public void process(ResultItems resultItems, Task arg1)
{
// TODO Auto-generated method stub
Article vo = resultItems.get("repo");
PrintWriter pw = null;
try
{
pw = new PrintWriter(new FileWriter("sina.txt", true)); pw.println(vo);
pw.flush(); }catch(FileNotFoundException e) {
e.printStackTrace();
}catch (IOException e)
{
e.printStackTrace();
} finally
{
pw.close();
}
} }
 package com.mieba;

 public class Article
{
private String title;
private String content;
public String getTitle()
{
return title;
}
public void setTitle(String title)
{
this.title = title;
}
public String getContent()
{
return content;
}
public void setContent(String content)
{
this.content = content;
}
public Article(String title, String content)
{
super();
this.title = title;
this.content = content;
}
@Override
public String toString()
{
return "Article [title=" + title + ", content=" + content + "]";
} }
 package com.mieba;

 import us.codecraft.webmagic.Spider;

 public class Demo
{ public static void main(String[] args)
{ // 爬取开始
Spider
// 爬取过程
.create(new SinaPageProcessor())
// 爬取结果保存
.addPipeline(new SinaPipeline())
// 爬取的第一个页面
.addUrl("http://blog.sina.com.cn/s/articlelist_1487828712_0_1.html")
// 启用的线程数
.thread(5).run();
}
}

运行结果

Java爬虫学习(3)之用对象保存新浪微博博文

爬取到的数据

Java爬虫学习(3)之用对象保存新浪微博博文

总结:

关于简单的页面基本可以实现爬取,并且用对象进行存储数据,并最终保存为txt文档。

目前存在的问题,在于一些前端渲染的页面,还找不到url链接去完成相应的爬取,还需要进一步学习模拟登录页面,以获得隐藏的url等数据。