You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2007/09/13 23:22:34 UTC

svn commit: r575460 - in /lucene/hadoop/trunk: CHANGES.txt src/test/org/apache/hadoop/dfs/MiniDFSCluster.java

Author: dhruba
Date: Thu Sep 13 14:22:33 2007
New Revision: 575460

URL: http://svn.apache.org/viewvc?rev=575460&view=rev
Log:
HADOOP-1890. Removed debugging prints introduced by HADOOP-1774.
(Raghu Angadi via dhruba)


Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=575460&r1=575459&r2=575460&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Thu Sep 13 14:22:33 2007
@@ -81,6 +81,9 @@
 
   BUG FIXES
 
+    HADOOP-1890. Removed debugging prints introduced by HADOOP-1774.
+    (Raghu Angadi via dhruba)
+
     HADOOP-1763. Too many lost task trackers on large clusters due to
     insufficient number of RPC handler threads on the JobTracker.
     (Devaraj Das)

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?rev=575460&r1=575459&r2=575460&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Thu Sep 13 14:22:33 2007
@@ -26,7 +26,6 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
-import org.apache.hadoop.fs.Command;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.ToolRunner;
@@ -125,9 +124,6 @@
     // Format and clean out DataNode directories
     if (format) {
       if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
-        String[] cmd = { "find", data_dir.toString() };
-        String reply = Command.execCommand(cmd);
-        System.err.print("Reply from find : " + reply);
         throw new IOException("Cannot remove data directory: " + data_dir);
       }
       NameNode.format(conf);