本文整理汇总了Java中edu.uci.ics.crawler4j.crawler.CrawlController类的典型用法代码示例。如果您正苦于以下问题:Java CrawlController类的具体用法?Java CrawlController怎么用?Java CrawlController使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CrawlController类属于edu.uci.ics.crawler4j.crawler包,在下文中一共展示了CrawlController类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
int numberOfCrawlers = 2;
CrawlConfig config = new CrawlConfig();
String crawlStorageFolder = "data";
config.setCrawlStorageFolder(crawlStorageFolder);
config.setPolitenessDelay(500);
config.setMaxDepthOfCrawling(2);
config.setMaxPagesToFetch(20);
config.setIncludeBinaryContentInCrawling(false);
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
controller.addSeed("https://en.wikipedia.org/wiki/Bishop_Rock,_Isles_of_Scilly");
controller.start(SampleCrawler.class, numberOfCrawlers);
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:21,代码来源:CrawlerController.java
示例2: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
String crawlStorageFolder = "/data/crawl/root";
int numberOfCrawlers = 7;
CrawlConfig config = new CrawlConfig();
config.setCrawlStorageFolder(crawlStorageFolder);
config.setPolitenessDelay(500);
config.setMaxDepthOfCrawling(2);
config.setMaxPagesToFetch(1000);
config.setResumableCrawling(false);
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController crawlController = new CrawlController(config, pageFetcher, robotstxtServer);
crawlController.addSeed("http://www.11st.co.kr/html/main.html");
crawlController.addSeed("http://www.11st.co.kr/html/category/1930.html");
crawlController.start(MyCrawler.class, numberOfCrawlers);
}
示例3: crawlAndImport
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
/**
* This is where everything happens!
*/
private void crawlAndImport() throws Exception {
CrawlConfig crawlConfig = buildCrawlConfig();
PageFetcher pageFetcher = new PageFetcher(crawlConfig);
RobotstxtConfig robotsTxtConfig = new RobotstxtConfig();
robotsTxtConfig.setEnabled(appConfig.isRespectRobotsTxt());
RobotstxtServer robotsTxtServer = new RobotstxtServer(robotsTxtConfig, pageFetcher);
CrawlController crawlController = new CrawlController(crawlConfig, pageFetcher, robotsTxtServer);
// "dependency injection" into crawlers
Object[] customData = new Object[] { appConfig, graphImporter };
crawlController.setCustomData(customData);
addSeedUrls(crawlController);
logger.info("Start crawling");
/*
* Start the crawl. This is a blocking operation, meaning that your code will reach the line after this only
* when crawling is finished.
*/
crawlController.start(HtmlOnlyCrawler.class, appConfig.getNumberOfCrawlers());
logger.info("Finished crawling");
}
示例4: init
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
/**
* 初始化
*
* @param numberOfCrawlers
* 爬虫线程数
* @param maxDepthOfCrawling
* 抓取深度
* @param maxPagesToFetch
* 最大抓取页数
* @param politenessDelay
* 延迟
* @param links
* 待爬取链接
*/
public void init(int numberOfCrawlers, int maxDepthOfCrawling, int maxPagesToFetch, int politenessDelay,
String[] links) {
this.numberOfCrawlers = numberOfCrawlers;
CrawlConfig config = new CrawlConfig();
config.setCrawlStorageFolder(DefaultConfigValues.CRAWL_STORAGE_FOLDER);
config.setMaxDepthOfCrawling(maxDepthOfCrawling);
config.setIncludeHttpsPages(true);
config.setMaxPagesToFetch(maxPagesToFetch);
config.setIncludeBinaryContentInCrawling(false);
config.setPolitenessDelay(politenessDelay);
config.setUserAgentString(DefaultConfigValues.USER_AGENT);
config.setResumableCrawling(true);
if (com.zhazhapan.vspider.models.CrawlConfig.getTurnOnProxy().get()) {
logger.info("open proxy");
config.setProxyHost(com.zhazhapan.vspider.models.CrawlConfig.getProxyServer().get());
config.setProxyPort(Formatter.stringToInt(com.zhazhapan.vspider.models.CrawlConfig.getProxyPort().get()));
config.setProxyUsername(com.zhazhapan.vspider.models.CrawlConfig.getProxyUser().get());
config.setProxyPassword(com.zhazhapan.vspider.models.CrawlConfig.getProxyPass().get());
}
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
robotstxtConfig.setEnabled(false);
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
try {
controller = new CrawlController(config, pageFetcher, robotstxtServer);
for (String link : links) {
if (Checker.isHyperLink(link)) {
controller.addSeed(link);
}
}
isInited = true;
} catch (Exception e) {
logger.error("start to crawl urls error: " + e.getMessage());
}
}
示例5: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
String crawlStorageFolder = "/data/crawl/root";
int numberOfCrawlers = 7;
CrawlConfig config = new CrawlConfig();
config.setPolitenessDelay(100);
config.setCrawlStorageFolder(crawlStorageFolder);
/*
* Instantiate the controller for this crawl.
*/
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
/*
* For each crawl, you need to add some seed urls. These are the first
* URLs that are fetched and then the crawler starts following links
* which are found in these pages
*/
controller.addSeed("https://de.wikipedia.org/wiki/Java_Database_Connectivity");
controller.addSeed("https://de.wikipedia.org/wiki/Relationale_Datenbank");
controller.addSeed("https://pt.wikipedia.org/wiki/JDBC");
controller.addSeed("https://pt.wikipedia.org/wiki/Protocolo");
controller.addSeed("https://de.wikipedia.org/wiki/Datenbank");
/*
* Start the crawl. This is a blocking operation, meaning that your code
* will reach the line after this only when crawling is finished.
*/
controller.start(new PostgresCrawlerFactory("jdbc:postgresql://localhost/crawler4j","postgres","postgres"), numberOfCrawlers);
}
示例6: run
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public void run(final CrawlerSettings crawlerSettings, final List<Memo> memos) throws Exception {
CrawlConfig config = crawlerSettings.getCrawlConfig();
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
for (String seed : crawlerSettings.getSeeds()) {
controller.addSeed(seed);
}
ActionsCrawler.configure(memoEntryFinder, memoMatching, servicesContext, memos);
controller.start(ActionsCrawler.class, crawlerSettings.getNumberOfCrawlers());
}
示例7: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 1) {
return;
}
String crawlStorageFolder = args[0];
int numberOfCrawlers = 1;
CrawlConfig config = new CrawlConfig();
config.setCrawlStorageFolder(crawlStorageFolder);
/*
* Instantiate the controller for this crawl.
*/
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
/*
* For each crawl, you need to add some seed urls. These are the first
* URLs that are fetched and then the crawler starts following links
* which are found in these pages
*/
controller.addSeed("http://www.senado.leg.br/senadores/default.asp");
/*
* Start the crawl. This is a blocking operation, meaning that your code
* will reach the line after this only when crawling is finished.
*/
controller.start(SenatorsCrawler.class, numberOfCrawlers);
}
示例8: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
String crawlStorageFolder = "D:\\etc\\storage";
int numberOfCrawlers = 7;
CrawlConfig config = new CrawlConfig();
config.setCrawlStorageFolder(crawlStorageFolder);
/*
* Instantiate the controller for this crawl.
*/
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
/*
* For each crawl, you need to add some seed urls. These are the first
* URLs that are fetched and then the crawler starts following links
* which are found in these pages
*/
controller.addSeed("http://www.ics.uci.edu/~lopes/");
controller.addSeed("http://www.ics.uci.edu/~welling/");
controller.addSeed("http://www.ics.uci.edu/");
/*
* Start the crawl. This is a blocking operation, meaning that your code
* will reach the line after this only when crawling is finished.
*/
controller.start(MyCrawler.class, numberOfCrawlers);
}
示例9: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length < 3) {
System.out.println("Needed parameters: ");
System.out.println("\t rootFolder (it will contain intermediate crawl data)");
System.out.println("\t numberOfCralwers (number of concurrent threads)");
System.out.println("\t storageFolder (a folder for storing downloaded images)");
return;
}
String rootFolder = args[0];
int numberOfCrawlers = Integer.parseInt(args[1]);
String storageFolder = args[2];
CrawlConfig config = new CrawlConfig();
config.crawlStorageFolder_$eq(rootFolder);
/*
* Since images are binary content, we need to set this parameter to
* true to make sure they are included in the crawl.
*/
config.includeBinaryContentInCrawling_$eq(true);
String[] crawlDomains = new String[] { "http://uci.edu/" };
PageFetcher pageFetcher = new PageFetcher(config);
RobotsTxtConfig robotstxtConfig = new RobotsTxtConfig();
RobotsTxtServer robotstxtServer = new RobotsTxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
for (String domain : crawlDomains) {
controller.addSeed(domain);
}
ImageCrawler.configure(crawlDomains, storageFolder);
controller.start(ImageCrawler.class, numberOfCrawlers);
}
示例10: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.out.println("Needed parameters: ");
System.out.println("\t rootFolder (it will contain intermediate crawl data)");
System.out.println("\t numberOfCralwers (number of concurrent threads)");
return;
}
String rootFolder = args[0];
int numberOfCrawlers = Integer.parseInt(args[1]);
CrawlConfig config = new CrawlConfig();
config.crawlStorageFolder_$eq(rootFolder);
config.maxPagesToFetch_$eq(10);
config.politenessDelay_$eq(1000);
PageFetcher pageFetcher = new PageFetcher(config);
RobotsTxtConfig robotstxtConfig = new RobotsTxtConfig();
RobotsTxtServer robotstxtServer = new RobotsTxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
controller.addSeed("http://www.ics.uci.edu/");
controller.start(LocalDataCollectorCrawler.class, numberOfCrawlers);
List<Object> crawlersLocalData = controller.getCrawlersLocalData();
long totalLinks = 0;
long totalTextSize = 0;
int totalProcessedPages = 0;
for (Object localData : crawlersLocalData) {
CrawlStat stat = (CrawlStat) localData;
totalLinks += stat.getTotalLinks();
totalTextSize += stat.getTotalTextSize();
totalProcessedPages += stat.getTotalProcessedPages();
}
System.out.println("Aggregated Statistics:");
System.out.println(" Processed Pages: " + totalProcessedPages);
System.out.println(" Total Links found: " + totalLinks);
System.out.println(" Total Text Size: " + totalTextSize);
}
示例11: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length < 3) {
System.out.println("Needed parameters: ");
System.out.println("\t rootFolder (it will contain intermediate crawl data)");
System.out.println("\t numberOfCralwers (number of concurrent threads)");
System.out.println("\t storageFolder (a folder for storing downloaded images)");
return;
}
String rootFolder = args[0];
int numberOfCrawlers = Integer.parseInt(args[1]);
String storageFolder = args[2];
CrawlConfig config = new CrawlConfig();
config.setCrawlStorageFolder(rootFolder);
/*
* Since images are binary content, we need to set this parameter to
* true to make sure they are included in the crawl.
*/
config.setIncludeBinaryContentInCrawling(true);
String[] crawlDomains = new String[] { "http://uci.edu/" };
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
for (String domain : crawlDomains) {
controller.addSeed(domain);
}
ImageCrawler.configure(crawlDomains, storageFolder);
controller.start(ImageCrawler.class, numberOfCrawlers);
}
示例12: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.out.println("Needed parameters: ");
System.out.println("\t rootFolder (it will contain intermediate crawl data)");
System.out.println("\t numberOfCralwers (number of concurrent threads)");
return;
}
String rootFolder = args[0];
int numberOfCrawlers = Integer.parseInt(args[1]);
CrawlConfig config = new CrawlConfig();
config.setCrawlStorageFolder(rootFolder);
config.setMaxPagesToFetch(10);
config.setPolitenessDelay(1000);
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
controller.addSeed("http://www.ics.uci.edu/");
controller.start(LocalDataCollectorCrawler.class, numberOfCrawlers);
List<Object> crawlersLocalData = controller.getCrawlersLocalData();
long totalLinks = 0;
long totalTextSize = 0;
int totalProcessedPages = 0;
for (Object localData : crawlersLocalData) {
CrawlStat stat = (CrawlStat) localData;
totalLinks += stat.getTotalLinks();
totalTextSize += stat.getTotalTextSize();
totalProcessedPages += stat.getTotalProcessedPages();
}
System.out.println("Aggregated Statistics:");
System.out.println(" Processed Pages: " + totalProcessedPages);
System.out.println(" Total Links found: " + totalLinks);
System.out.println(" Total Text Size: " + totalTextSize);
}
示例13: addSeedUrls
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
/**
* For each crawl, we need to add some seed URLs. These are the first URLs that are fetched and then the crawler
* starts following links which are found in these pages.
*/
private void addSeedUrls(CrawlController crawlController) {
String[] seedUrls = appConfig.getSeedUrls();
for (int i = 0; i < seedUrls.length; i++) {
crawlController.addSeed(seedUrls[i]);
}
}
示例14: execute
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void execute() throws Exception {
urlMap = GetURL.getAllUrl();
String crawlStorageFolder = "/data/crawl/root";
//�����������
int numberOfCrawlers = 2;
CrawlConfig config = new CrawlConfig();
//���ô�������м���Ϣ���ļ�Ŀ¼
config.setCrawlStorageFolder(crawlStorageFolder);
//������ȡ���
config.setMaxDepthOfCrawling(0);
//�����Ƿ���ȡ���������ݵ�ҳ��
config.setIncludeBinaryContentInCrawling(false);
//���⼫����ʷ��������������ɣ����������������ǰ�ȴ�200���루Ĭ�ϣ�
config.setPolitenessDelay(200);
//���¿�������
//config.setResumableCrawling(true);
//��ʼ������������Ϣ
PageFetcher pageFetcher = new PageFetcher(config);
RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
/*
* Ϊÿ��������ӳ�ʼ��ȡҳ�棬������ÿ��ҳ�淢�ֵ�������Ϊ��ȡ����
* �����ݿ���Ҫ��ȡ��url��ӵ���ȡ�б���
*/
//note: map.values and map.keySet ˳���Ƿ�һ�£���飩
for (String url : urlMap.keySet()) {
controller.addSeed(url);
}
/*
* Start the crawl. This is a blocking operation, meaning that your code
* will reach the line after this only when crawling is finished.
*/
controller.startNonBlocking(MyCrawler.class, numberOfCrawlers);
//�ȴ� 1 ���ӣ���ֹ������ȡ������վ��ֹ
Thread.sleep(1000);
controller.waitUntilFinish();
}
示例15: main
import edu.uci.ics.crawler4j.crawler.CrawlController; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 1) {
System.out.println("Needed parameter: ");
System.out.println("\t rootFolder (it will contain intermediate crawl data)");
return;
}
/*
* crawlStorageFolder is a folder where intermediate crawl data is
* stored.
*/
String crawlStorageFolder = args[0];
CrawlConfig config1 = new CrawlConfig();
CrawlConfig config2 = new CrawlConfig();
/*
* The two crawlers should have different storage folders for their
* intermediate data
*/
config1.crawlStorageFolder_$eq(crawlStorageFolder + "/crawler1");
config2.crawlStorageFolder_$eq(crawlStorageFolder + "/crawler2");
config1.politenessDelay_$eq(1000);
config2.politenessDelay_$eq(2000);
config1.maxPagesToFetch_$eq(50);
config2.maxPagesToFetch_$eq(100);
/*
* We will use different PageFetchers for the two crawlers.
*/
PageFetcher pageFetcher1 = new PageFetcher(config1);
PageFetcher pageFetcher2 = new PageFetcher(config2);
/*
* We will use the same RobotstxtServer for both of the crawlers.
*/
RobotsTxtConfig robotstxtConfig = new RobotsTxtConfig();
RobotsTxtServer robotstxtServer = new RobotsTxtServer(robotstxtConfig, pageFetcher1);
CrawlController controller1 = new CrawlController(config1, pageFetcher1, robotstxtServer);
CrawlController controller2 = new CrawlController(config2, pageFetcher2, robotstxtServer);
String[] crawler1Domains = new String[] { "http://www.ics.uci.edu/", "http://www.cnn.com/" };
String[] crawler2Domains = new String[] { "http://en.wikipedia.org/" };
controller1.setCustomData(crawler1Domains);
controller2.setCustomData(crawler2Domains);
controller1.addSeed("http://www.ics.uci.edu/");
controller1.addSeed("http://www.cnn.com/");
controller1.addSeed("http://www.ics.uci.edu/~lopes/");
controller1.addSeed("http://www.cnn.com/POLITICS/");
controller2.addSeed("http://en.wikipedia.org/wiki/Main_Page");
controller2.addSeed("http://en.wikipedia.org/wiki/Obama");
controller2.addSeed("http://en.wikipedia.org/wiki/Bing");
/*
* The first crawler will have 5 cuncurrent threads and the second
* crawler will have 7 threads.
*/
controller1.startNonBlocking(BasicCrawler.class, 5);
controller2.startNonBlocking(BasicCrawler.class, 7);
controller1.waitUntilFinish();
System.out.println("Crawler 1 is finished.");
controller2.waitUntilFinish();
System.out.println("Crawler 2 is finished.");
}