You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cd...@apache.org on 2008/09/30 23:32:25 UTC
svn commit: r700588 - in /hadoop/core/branches/branch-0.19: CHANGES.txt
src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
Author: cdouglas
Date: Tue Sep 30 14:32:25 2008
New Revision: 700588
URL: http://svn.apache.org/viewvc?rev=700588&view=rev
Log:
HADOOP-4281. Change dfsadmin to report available disk space in a format
consistent with the web interface as defined in HADOOP-2816. Contributed by
Suresh Srinivas
Modified:
hadoop/core/branches/branch-0.19/CHANGES.txt
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=700588&r1=700587&r2=700588&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Tue Sep 30 14:32:25 2008
@@ -81,6 +81,10 @@
HADOOP-4293. Make Configuration Writable and remove unreleased
WritableJobConf. Configuration.write is renamed to writeXml. (omalley)
+ HADOOP-4281. Change dfsadmin to report available disk space in a format
+ consistent with the web interface as defined in HADOOP-2816. (Suresh
+ Srinivas via cdouglas)
+
NEW FEATURES
HADOOP-3341. Allow streaming jobs to specify the field separator for map
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=700588&r1=700587&r2=700588&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Tue Sep 30 14:32:25 2008
@@ -155,8 +155,11 @@
public String getDatanodeReport() {
StringBuffer buffer = new StringBuffer();
long c = getCapacity();
+ long pc = getPresentCapacity();
long r = getRemaining();
long u = getDfsUsed();
+ float usedPercent = getDfsUsedPercent();
+
buffer.append("Name: "+name+"\n");
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
buffer.append("Rack: "+location+"\n");
@@ -169,10 +172,11 @@
} else {
buffer.append("Normal\n");
}
- buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
- buffer.append("Remaining raw bytes: " +r+ "("+FsShell.byteDesc(r)+")"+"\n");
- buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
- buffer.append("% used: "+FsShell.limitDecimalTo2(100.0*u/(c+1e-10))+"%\n");
+ buffer.append("Configured Capacity: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
+ buffer.append("Present Capacity: "+pc+" ("+FsShell.byteDesc(pc)+")"+"\n");
+ buffer.append("DFS Remaining: " +r+ "("+FsShell.byteDesc(r)+")"+"\n");
+ buffer.append("DFS Used: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
+ buffer.append("DFS Used%: "+FsShell.limitDecimalTo2(usedPercent)+"%\n");
buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
return buffer.toString();
}
@@ -181,6 +185,7 @@
public String dumpDatanode() {
StringBuffer buffer = new StringBuffer();
long c = getCapacity();
+ long pc = getPresentCapacity();
long r = getRemaining();
long u = getDfsUsed();
buffer.append(name);
@@ -195,6 +200,7 @@
buffer.append(" IN");
}
buffer.append(" " + c + "(" + FsShell.byteDesc(c)+")");
+ buffer.append(" " + pc + "(" + FsShell.byteDesc(c)+")");
buffer.append(" " + u + "(" + FsShell.byteDesc(u)+")");
buffer.append(" " + FsShell.limitDecimalTo2(((1.0*u)/c)*100)+"%");
buffer.append(" " + r + "(" + FsShell.byteDesc(r)+")");
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=700588&r1=700587&r2=700588&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Tue Sep 30 14:32:25 2008
@@ -256,9 +256,10 @@
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) fs;
DiskStatus ds = dfs.getDiskStatus();
- long raw = ds.getCapacity();
- long rawUsed = ds.getDfsUsed();
+ long capacity = ds.getCapacity();
+ long used = ds.getDfsUsed();
long remaining = ds.getRemaining();
+ long presentCapacity = used + remaining;
boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
UpgradeStatusReport status =
dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
@@ -269,14 +270,16 @@
if (status != null) {
System.out.println(status.getStatusText(false));
}
- System.out.println("Total raw bytes: " + raw
- + " (" + byteDesc(raw) + ")");
- System.out.println("Remaining raw bytes: " + remaining
+ System.out.println("Configured Capacity: " + capacity
+ + " (" + byteDesc(capacity) + ")");
+ System.out.println("Present Capacity: " + presentCapacity
+ + " (" + byteDesc(presentCapacity) + ")");
+ System.out.println("DFS Remaining: " + remaining
+ " (" + byteDesc(remaining) + ")");
- System.out.println("Used raw bytes: " + rawUsed
- + " (" + byteDesc(rawUsed) + ")");
- System.out.println("% used: "
- + limitDecimalTo2(((1.0 * rawUsed) / raw) * 100)
+ System.out.println("DFS Used: " + used
+ + " (" + byteDesc(used) + ")");
+ System.out.println("DFS Used%: "
+ + limitDecimalTo2(((1.0 * used) / presentCapacity) * 100)
+ "%");
System.out.println();