码迷,mamicode.com
首页 > 其他好文 > 详细

爬虫代码实现七:实现高、低级队列循环抓取

时间:2017-01-19 12:01:15      阅读:247      评论:0      收藏:0      [点我收藏+]

标签:ack   tar   commons   entity   count   并且   tsp   pos   lis   

1.定义仓库接口

package com.dajiangtai.djt_spider.service;

/**
*
* 存储url仓库接口
*
*/
public interface IRepositoryService {

public String poll();

public void addHighLevel(String url);

public void addLowLevel(String url);
}

2.仓库接口实现类

package com.dajiangtai.djt_spider.service.impl;

import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedDeque;

import org.apache.commons.lang.StringUtils;

import com.dajiangtai.djt_spider.service.IRepositoryService;

/**
*
* url仓库实现类
*
*/
public class QueueRepositoryService implements IRepositoryService {

//高优先级
private Queue<String> highLevelQueue = new ConcurrentLinkedDeque<String>();
//低优先级
private Queue<String> lowLevelQueue = new ConcurrentLinkedDeque<String>();

public String poll() {
//先解析高优先级队列
String url = highLevelQueue.poll();
if(StringUtils.isBlank(url)){
//然后再解析低优先级队列
url = lowLevelQueue.poll();
}
return url;
}

public void addHighLevel(String url) {
this.highLevelQueue.add(url);

}

public void addLowLevel(String url) {
this.lowLevelQueue.add(url);

}

}

3.重构爬虫入口类

package com.dajiangtai.djt_spider.start;

import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.lang.StringUtils;

import com.dajiangtai.djt_spider.entity.Page;
import com.dajiangtai.djt_spider.service.IDownLoadService;
import com.dajiangtai.djt_spider.service.IProcessService;
import com.dajiangtai.djt_spider.service.IRepositoryService;
import com.dajiangtai.djt_spider.service.IStoreService;
import com.dajiangtai.djt_spider.service.impl.ConsoleStoreService;
import com.dajiangtai.djt_spider.service.impl.HttpClientDownLoadService;
import com.dajiangtai.djt_spider.service.impl.QueueRepositoryService;
import com.dajiangtai.djt_spider.service.impl.YOUKUProcessService1;

/**
* 电视剧爬虫入口类
* @author Administrator
*
*/
public class StartDSJCount {

//页面下载接口
private IDownLoadService downLoadService;
//页面解析接口
private IProcessService processService;
//数据存储接口
private IStoreService storeService;

private IRepositoryService repositoryService;

//并发执行队列,这里只有一个队列,没有优先级,因此需要对它优化,注释这一行,换成新的接口实现方法
//private Queue<String> urlQueue = new ConcurrentLinkedDeque<String>();

public static void main(String[] args) {
StartDSJCount dsj = new StartDSJCount();
dsj.setDownLoadService(new HttpClientDownLoadService());
dsj.setProcessService(new YOUKUProcessService1());
dsj.setStoreService(new ConsoleStoreService());
dsj.setRepositoryService(new QueueRepositoryService());

//详情页面url
// String url = "http://list.youku.com/show/id_z9cd2277647d311e5b692.html?spm=a2h0j.8191423.sMain.5~5~A!2.iCUyO9";

//列表页面url
String url = "http://tv.youku.com/search/index/_page40177_comdid_40177";

// //下载页面
// Page page = dsj.downloadPage(url);
// dsj.processPage(page);
// //存储页面信息
// dsj.storePageInfo(page);

//设置起始的url
// dsj.urlQueue.add(url);

//将起始的url放入高优先级队列中,起始url为列表url
dsj.repositoryService.addHighLevel(url);
//开启爬虫
dsj.startSpider();



}

//开启一个爬虫入口
public void startSpider(){
//循环抓取
while(true){
//从队列中提取需要解析的url
//String url = urlQueue.poll();

//保证了先从高优先级中取,再从低优先级队列中取
String url = repositoryService.poll();

//判断url是否为空
if(StringUtils.isNotBlank(url)){
//下载
Page page = this.downloadPage(url);
//解析
this.processPage(page);
//解析后将urlList中的url分别取出来并且放入队列中
List<String> urlList = page.getUrlList();
for(String eachurl:urlList){
//this.urlQueue.add(eachurl);
//如果是列表url,加入到高优先级队列中
if(eachurl.startsWith("http://tv.youku.com/search/index")){
this.repositoryService.addHighLevel(eachurl);
}else{
//如果是详情页面url,加到低优先级队列中
this.repositoryService.addLowLevel(eachurl);
}
}
//page.getUrl()表示当前页,当前页如果是详情页,则存储数据
if(page.getUrl().startsWith("http://www.youku.com/show_page")){
//存储数据
this.storePageInfo(page);
}
}else{
System.out.println("队列中的电视剧url解析完毕,请等待!");
}
}
}

//下载页面方法
public Page downloadPage(String url){
return this.downLoadService.download(url);
}

//解析页面方法
public void processPage(Page page){
this.processService.process(page);
}

//存储页面信息方法
public void storePageInfo(Page page){
this.storeService.store(page);
}
public IDownLoadService getDownLoadService() {
return downLoadService;
}

public void setDownLoadService(IDownLoadService downLoadService) {
this.downLoadService = downLoadService;
}

public IProcessService getProcessService() {
return processService;
}

public void setProcessService(IProcessService processService) {
this.processService = processService;
}

 

public IStoreService getStoreService() {
return storeService;
}

 

public void setStoreService(IStoreService storeService) {
this.storeService = storeService;
}


public IRepositoryService getRepositoryService() {
return repositoryService;
}


public void setRepositoryService(IRepositoryService repositoryService) {
this.repositoryService = repositoryService;
}


}

爬虫代码实现七:实现高、低级队列循环抓取

标签:ack   tar   commons   entity   count   并且   tsp   pos   lis   

原文地址:http://www.cnblogs.com/lchzls/p/6305922.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!