You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nutch.apache.org by sn...@apache.org on 2015/03/31 21:28:14 UTC

svn commit: r1670442 - /nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java

Author: snagel
Date: Tue Mar 31 19:28:14 2015
New Revision: 1670442

URL: http://svn.apache.org/r1670442
Log:
NUTCH-1979 CrawlDbReader to implement Tool: fix unit test

Modified:
    nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java

Modified: nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java
URL: http://svn.apache.org/viewvc/nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java?rev=1670442&r1=1670441&r2=1670442&view=diff
==============================================================================
--- nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java (original)
+++ nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java Tue Mar 31 19:28:14 2015
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.MapFile;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
 import org.apache.nutch.util.NutchConfiguration;
 import org.junit.After;
 import org.junit.Assert;
@@ -113,7 +114,7 @@ public class TestCrawlDbMerger {
       String url = it.next();
       LOG.fine("url=" + url);
       CrawlDatum cd = expected.get(url);
-      CrawlDatum res = reader.get(crawlDb, url, conf);
+      CrawlDatum res = reader.get(crawlDb, url, new JobConf(conf));
       LOG.fine(" -> " + res);
       System.out.println("url=" + url);
       System.out.println(" cd " + cd);



Re: svn commit: r1670442 - /nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java

Posted by "Mattmann, Chris A (3980)" <ch...@jpl.nasa.gov>.
Nice work gents!

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Chris Mattmann, Ph.D.
Chief Architect
Instrument Software and Science Data Systems Section (398)
NASA Jet Propulsion Laboratory Pasadena, CA 91109 USA
Office: 168-519, Mailstop: 168-527
Email: chris.a.mattmann@nasa.gov
WWW:  http://sunset.usc.edu/~mattmann/
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Adjunct Associate Professor, Computer Science Department
University of Southern California, Los Angeles, CA 90089 USA
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++






-----Original Message-----
From: "snagel@apache.org" <sn...@apache.org>
Reply-To: "dev@nutch.apache.org" <de...@nutch.apache.org>
Date: Tuesday, March 31, 2015 at 12:28 PM
To: "commits@nutch.apache.org" <co...@nutch.apache.org>
Subject: svn commit: r1670442 -
/nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java

>Author: snagel
>Date: Tue Mar 31 19:28:14 2015
>New Revision: 1670442
>
>URL: http://svn.apache.org/r1670442
>Log:
>NUTCH-1979 CrawlDbReader to implement Tool: fix unit test
>
>Modified:
>    nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java
>
>Modified: 
>nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java
>URL: 
>http://svn.apache.org/viewvc/nutch/trunk/src/test/org/apache/nutch/crawl/T
>estCrawlDbMerger.java?rev=1670442&r1=1670441&r2=1670442&view=diff
>==========================================================================
>====
>--- nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java
>(original)
>+++ nutch/trunk/src/test/org/apache/nutch/crawl/TestCrawlDbMerger.java
>Tue Mar 31 19:28:14 2015
>@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem;
> import org.apache.hadoop.fs.Path;
> import org.apache.hadoop.io.MapFile;
> import org.apache.hadoop.io.Text;
>+import org.apache.hadoop.mapred.JobConf;
> import org.apache.nutch.util.NutchConfiguration;
> import org.junit.After;
> import org.junit.Assert;
>@@ -113,7 +114,7 @@ public class TestCrawlDbMerger {
>       String url = it.next();
>       LOG.fine("url=" + url);
>       CrawlDatum cd = expected.get(url);
>-      CrawlDatum res = reader.get(crawlDb, url, conf);
>+      CrawlDatum res = reader.get(crawlDb, url, new JobConf(conf));
>       LOG.fine(" -> " + res);
>       System.out.println("url=" + url);
>       System.out.println(" cd " + cd);
>
>