You are viewing a plain text version of this content. The canonical link for it is here.
Posted to user@nutch.apache.org by Ali Nazemian <al...@gmail.com> on 2014/06/24 12:17:13 UTC

Incremental web crawling based on number of web pages

Hi,
I am going to change crawler class in a way that it can crawl incrementally
based on the number of web pages. Suppose the sum of all pages for 2 depth
crawling is around 5000 pages. Right now this class runs
generate-fetch-update for all pages and after finishing it will send them
to solr for indexing. I want to change this class in a way that it can
break this 5000 pages to 10 different generate-fetch-update cycle.  Is that
possible with nutch? If yes how can I do that?

Crawler source:

public class Crawler extends Configured implements Tool {
public static final Logger LOG = LoggerFactory.getLogger(Crawler.class);

private static String getDate() {
return new SimpleDateFormat("yyyyMMddHHmmss").format(new Date(System
.currentTimeMillis()));
}

/*
 * Perform complete crawling and indexing (to Solr) given a set of root urls
 * and the -solr parameter respectively. More information and Usage
 * parameters can be found below.
 */
public static void main(String args[]) throws Exception {
Configuration conf = NutchConfiguration.create();
int res = ToolRunner.run(conf, new Crawler(), args);
System.exit(res);
}

@Override
public int run(String[] args) throws Exception {
if (args.length < 1) {
System.out
.println("Usage: Crawl <urlDir> -solr <solrURL> [-dir d] [-threads n]
[-depth i] [-topN N]");
return -1;
}
Path rootUrlDir = null;
Path dir = new Path("crawl-" + getDate());
int threads = getConf().getInt("fetcher.threads.fetch", 10);
int depth = 5;
long topN = Long.MAX_VALUE;
String solrUrl = null;

for (int i = 0; i < args.length; i++) {
if ("-dir".equals(args[i])) {
dir = new Path(args[i + 1]);
i++;
} else if ("-threads".equals(args[i])) {
threads = Integer.parseInt(args[i + 1]);
i++;
} else if ("-depth".equals(args[i])) {
depth = Integer.parseInt(args[i + 1]);
i++;
} else if ("-topN".equals(args[i])) {
topN = Integer.parseInt(args[i + 1]);
i++;
} else if ("-solr".equals(args[i])) {
solrUrl = args[i + 1];
i++;
} else if (args[i] != null) {
rootUrlDir = new Path(args[i]);
}
}

JobConf job = new NutchJob(getConf());

if (solrUrl == null) {
LOG.warn("solrUrl is not set, indexing will be skipped...");
} else {
// for simplicity assume that SOLR is used
// and pass its URL via conf
getConf().set("solr.server.url", solrUrl);
}

FileSystem fs = FileSystem.get(job);

if (LOG.isInfoEnabled()) {
LOG.info("crawl started in: " + dir);
LOG.info("rootUrlDir = " + rootUrlDir);
LOG.info("threads = " + threads);
LOG.info("depth = " + depth);
LOG.info("solrUrl=" + solrUrl);
if (topN != Long.MAX_VALUE)
LOG.info("topN = " + topN);
}

Path crawlDb = new Path(dir + "/crawldb");
Path linkDb = new Path(dir + "/linkdb");
Path segments = new Path(dir + "/segments");

//Path tmpDir = job.getLocalPath("crawl" + Path.SEPARATOR + getDate());
Injector injector = new Injector(getConf());
Generator generator = new Generator(getConf());
Fetcher fetcher = new Fetcher(getConf());
ParseSegment parseSegment = new ParseSegment(getConf());
CrawlDb crawlDbTool = new CrawlDb(getConf());
LinkDb linkDbTool = new LinkDb(getConf());

// initialize crawlDb
injector.inject(crawlDb, rootUrlDir);
int i;
for (i = 0; i < depth; i++) { // generate new segment
Path[] segs = generator.generate(crawlDb, segments, -1, topN,
System.currentTimeMillis());
if (segs == null) {
LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
break;
}
fetcher.fetch(segs[0], threads); // fetch it
if (!Fetcher.isParsing(job)) {
parseSegment.parse(segs[0]); // parse it, if needed
}
crawlDbTool.update(crawlDb, segs, true, true); // update crawldb
}
if (i > 0) {
linkDbTool.invert(linkDb, segments, true, true, false); // invert
// links
// dedup should be added

if (solrUrl != null) {
// index
FileStatus[] fstats = fs.listStatus(segments,
HadoopFSUtil.getPassDirectoriesFilter(fs));

IndexingJob indexer = new IndexingJob(getConf());
boolean noCommit = false;
indexer.index(crawlDb, linkDb,
Arrays.asList(HadoopFSUtil.getPaths(fstats)), noCommit);

}
// merge should be added
// clean should be added
} else {
LOG.warn("No URLs to fetch - check your seed list and URL filters.");
}
if (LOG.isInfoEnabled()) {
LOG.info("crawl finished: " + dir);
}
return 0;
}

}

Best regards.
-- 
A.Nazemian

Re: Incremental web crawling based on number of web pages

Posted by Sebastian Nagel <wa...@googlemail.com>.
Hi Ali,

> break this 5000 pages to 10 different generate-fetch-update cycle

This could be done without writing any line of Java code:

bin/nutch generate ... -maxNumSegments 10

will do the job, see http://wiki.apache.org/nutch/bin/nutch%20generate.
Then you'll get 10 segments, for each you have run fetch - parse - updatedb.

Sebastian

On 06/24/2014 12:17 PM, Ali Nazemian wrote:
> Hi,
> I am going to change crawler class in a way that it can crawl incrementally
> based on the number of web pages. Suppose the sum of all pages for 2 depth
> crawling is around 5000 pages. Right now this class runs
> generate-fetch-update for all pages and after finishing it will send them
> to solr for indexing. I want to change this class in a way that it can
> break this 5000 pages to 10 different generate-fetch-update cycle.  Is that
> possible with nutch? If yes how can I do that?
> 
> Crawler source:
> 
> public class Crawler extends Configured implements Tool {
> public static final Logger LOG = LoggerFactory.getLogger(Crawler.class);
> 
> private static String getDate() {
> return new SimpleDateFormat("yyyyMMddHHmmss").format(new Date(System
> .currentTimeMillis()));
> }
> 
> /*
>  * Perform complete crawling and indexing (to Solr) given a set of root urls
>  * and the -solr parameter respectively. More information and Usage
>  * parameters can be found below.
>  */
> public static void main(String args[]) throws Exception {
> Configuration conf = NutchConfiguration.create();
> int res = ToolRunner.run(conf, new Crawler(), args);
> System.exit(res);
> }
> 
> @Override
> public int run(String[] args) throws Exception {
> if (args.length < 1) {
> System.out
> .println("Usage: Crawl <urlDir> -solr <solrURL> [-dir d] [-threads n]
> [-depth i] [-topN N]");
> return -1;
> }
> Path rootUrlDir = null;
> Path dir = new Path("crawl-" + getDate());
> int threads = getConf().getInt("fetcher.threads.fetch", 10);
> int depth = 5;
> long topN = Long.MAX_VALUE;
> String solrUrl = null;
> 
> for (int i = 0; i < args.length; i++) {
> if ("-dir".equals(args[i])) {
> dir = new Path(args[i + 1]);
> i++;
> } else if ("-threads".equals(args[i])) {
> threads = Integer.parseInt(args[i + 1]);
> i++;
> } else if ("-depth".equals(args[i])) {
> depth = Integer.parseInt(args[i + 1]);
> i++;
> } else if ("-topN".equals(args[i])) {
> topN = Integer.parseInt(args[i + 1]);
> i++;
> } else if ("-solr".equals(args[i])) {
> solrUrl = args[i + 1];
> i++;
> } else if (args[i] != null) {
> rootUrlDir = new Path(args[i]);
> }
> }
> 
> JobConf job = new NutchJob(getConf());
> 
> if (solrUrl == null) {
> LOG.warn("solrUrl is not set, indexing will be skipped...");
> } else {
> // for simplicity assume that SOLR is used
> // and pass its URL via conf
> getConf().set("solr.server.url", solrUrl);
> }
> 
> FileSystem fs = FileSystem.get(job);
> 
> if (LOG.isInfoEnabled()) {
> LOG.info("crawl started in: " + dir);
> LOG.info("rootUrlDir = " + rootUrlDir);
> LOG.info("threads = " + threads);
> LOG.info("depth = " + depth);
> LOG.info("solrUrl=" + solrUrl);
> if (topN != Long.MAX_VALUE)
> LOG.info("topN = " + topN);
> }
> 
> Path crawlDb = new Path(dir + "/crawldb");
> Path linkDb = new Path(dir + "/linkdb");
> Path segments = new Path(dir + "/segments");
> 
> //Path tmpDir = job.getLocalPath("crawl" + Path.SEPARATOR + getDate());
> Injector injector = new Injector(getConf());
> Generator generator = new Generator(getConf());
> Fetcher fetcher = new Fetcher(getConf());
> ParseSegment parseSegment = new ParseSegment(getConf());
> CrawlDb crawlDbTool = new CrawlDb(getConf());
> LinkDb linkDbTool = new LinkDb(getConf());
> 
> // initialize crawlDb
> injector.inject(crawlDb, rootUrlDir);
> int i;
> for (i = 0; i < depth; i++) { // generate new segment
> Path[] segs = generator.generate(crawlDb, segments, -1, topN,
> System.currentTimeMillis());
> if (segs == null) {
> LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
> break;
> }
> fetcher.fetch(segs[0], threads); // fetch it
> if (!Fetcher.isParsing(job)) {
> parseSegment.parse(segs[0]); // parse it, if needed
> }
> crawlDbTool.update(crawlDb, segs, true, true); // update crawldb
> }
> if (i > 0) {
> linkDbTool.invert(linkDb, segments, true, true, false); // invert
> // links
> // dedup should be added
> 
> if (solrUrl != null) {
> // index
> FileStatus[] fstats = fs.listStatus(segments,
> HadoopFSUtil.getPassDirectoriesFilter(fs));
> 
> IndexingJob indexer = new IndexingJob(getConf());
> boolean noCommit = false;
> indexer.index(crawlDb, linkDb,
> Arrays.asList(HadoopFSUtil.getPaths(fstats)), noCommit);
> 
> }
> // merge should be added
> // clean should be added
> } else {
> LOG.warn("No URLs to fetch - check your seed list and URL filters.");
> }
> if (LOG.isInfoEnabled()) {
> LOG.info("crawl finished: " + dir);
> }
> return 0;
> }
> 
> }
> 
> Best regards.
>