You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2006/03/30 00:39:24 UTC
svn commit: r389916 - in /lucene/hadoop/trunk: conf/
src/java/org/apache/hadoop/dfs/ src/java/org/apache/hadoop/fs/
src/test/org/apache/hadoop/dfs/
Author: cutting
Date: Wed Mar 29 14:39:23 2006
New Revision: 389916
URL: http://svn.apache.org/viewcvs?rev=389916&view=rev
Log:
Fix HADOOP-33. Avoid calling df too frequently by caching values internally. Contributed by Konstantin Shvachko.
Modified:
lucene/hadoop/trunk/conf/hadoop-default.xml
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/LocalFileSystem.java
lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
Modified: lucene/hadoop/trunk/conf/hadoop-default.xml
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/conf/hadoop-default.xml?rev=389916&r1=389915&r2=389916&view=diff
==============================================================================
--- lucene/hadoop/trunk/conf/hadoop-default.xml (original)
+++ lucene/hadoop/trunk/conf/hadoop-default.xml Wed Mar 29 14:39:23 2006
@@ -97,6 +97,12 @@
cluster.</description>
</property>
+<property>
+ <name>dfs.df.interval</name>
+ <value>3000</value>
+ <description>Disk usage statistics refresh interval in msec.</description>
+</property>
+
<!-- map/reduce properties -->
<property>
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java?rev=389916&r1=389915&r2=389916&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java Wed Mar 29 14:39:23 2006
@@ -182,7 +182,7 @@
//
//////////////////////////////////////////////////////
- String dirpath = null;
+ DF diskUsage;
File data = null, tmp = null;
long reserved = 0;
FSDir dirTree;
@@ -192,7 +192,7 @@
* An FSDataset has a directory where it loads its data files.
*/
public FSDataset(File dir, Configuration conf) throws IOException {
- this.dirpath = dir.getCanonicalPath();
+ diskUsage = new DF( dir.getCanonicalPath(), conf);
this.data = new File(dir, "data");
if (! data.exists()) {
data.mkdirs();
@@ -209,14 +209,14 @@
* Return total capacity, used and unused
*/
public long getCapacity() throws IOException {
- return new DF(dirpath).getCapacity();
+ return diskUsage.getCapacity();
}
/**
* Return how many bytes can still be stored in the FSDataset
*/
public long getRemaining() throws IOException {
- return ((long) Math.round(USABLE_DISK_PCT * new DF(dirpath).getAvailable())) - reserved;
+ return ((long) Math.round(USABLE_DISK_PCT * diskUsage.getAvailable())) - reserved;
}
/**
@@ -397,7 +397,7 @@
for (int i = 0; i < invalidBlks.length; i++) {
File f = getFile(invalidBlks[i]);
- long len = f.length();
+ // long len = f.length();
if (!f.delete()) {
throw new IOException("Unexpected error trying to delete block " + invalidBlks[i] + " at file " + f);
}
@@ -422,7 +422,7 @@
public String toString() {
return "FSDataset{" +
- "dirpath='" + dirpath + "'" +
+ "dirpath='" + diskUsage.getDirPath() + "'" +
"}";
}
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java?rev=389916&r1=389915&r2=389916&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java Wed Mar 29 14:39:23 2006
@@ -21,9 +21,17 @@
import java.util.StringTokenizer;
+import org.apache.hadoop.conf.Configuration;
+
/** Filesystem disk space usage statistics. Uses the unix 'df' program.
- * Tested on Linux, FreeBSD and Cygwin. */
+ * Tested on Linux, FreeBSD, Cygwin. */
public class DF {
+ public static final long DF_INTERVAL_DEFAULT = 3 * 1000; // default DF refresh interval
+
+ private String dirPath;
+ private long dfInterval; // DF refresh interval in msec
+ private long lastDF; // last time doDF() was performed
+
private String filesystem;
private long capacity;
private long used;
@@ -31,35 +39,31 @@
private int percentUsed;
private String mount;
- public DF(String path) throws IOException {
+ public DF(String path, Configuration conf ) throws IOException {
+ this( path, conf.getLong( "dfs.df.interval", DF.DF_INTERVAL_DEFAULT ));
+ }
- Process process = Runtime.getRuntime().exec(new String[] {"df","-k",path});
+ public DF(String path, long dfInterval) throws IOException {
+ this.dirPath = path;
+ this.dfInterval = dfInterval;
+ lastDF = ( dfInterval < 0 ) ? 0 : -dfInterval;
+ this.doDF();
+ }
+
+ private void doDF() throws IOException {
+ if( lastDF + dfInterval > System.currentTimeMillis() )
+ return;
+ Process process;
+ process = Runtime.getRuntime().exec(getExecString());
try {
- if (process.waitFor() == 0) {
- BufferedReader lines =
- new BufferedReader(new InputStreamReader(process.getInputStream()));
-
- lines.readLine(); // skip headings
-
- StringTokenizer tokens =
- new StringTokenizer(lines.readLine(), " \t\n\r\f%");
-
- this.filesystem = tokens.nextToken();
- if (!tokens.hasMoreTokens()) { // for long filesystem name
- tokens = new StringTokenizer(lines.readLine(), " \t\n\r\f%");
- }
- this.capacity = Long.parseLong(tokens.nextToken()) * 1024;
- this.used = Long.parseLong(tokens.nextToken()) * 1024;
- this.available = Long.parseLong(tokens.nextToken()) * 1024;
- this.percentUsed = Integer.parseInt(tokens.nextToken());
- this.mount = tokens.nextToken();
-
- } else {
+ if (process.waitFor() != 0) {
throw new IOException
- (new BufferedReader(new InputStreamReader(process.getErrorStream()))
- .readLine());
+ (new BufferedReader(new InputStreamReader(process.getErrorStream()))
+ .readLine());
}
+ parseExecResult(
+ new BufferedReader(new InputStreamReader(process.getInputStream())));
} catch (InterruptedException e) {
throw new IOException(e.toString());
} finally {
@@ -69,12 +73,39 @@
/// ACCESSORS
- public String getFilesystem() { return filesystem; }
- public long getCapacity() { return capacity; }
- public long getUsed() { return used; }
- public long getAvailable() { return available; }
- public int getPercentUsed() { return percentUsed; }
- public String getMount() { return mount; }
+ public String getDirPath() {
+ return dirPath;
+ }
+
+ public String getFilesystem() throws IOException {
+ doDF();
+ return filesystem;
+ }
+
+ public long getCapacity() throws IOException {
+ doDF();
+ return capacity;
+ }
+
+ public long getUsed() throws IOException {
+ doDF();
+ return used;
+ }
+
+ public long getAvailable() throws IOException {
+ doDF();
+ return available;
+ }
+
+ public int getPercentUsed() throws IOException {
+ doDF();
+ return percentUsed;
+ }
+
+ public String getMount() throws IOException {
+ doDF();
+ return mount;
+ }
public String toString() {
return
@@ -87,7 +118,33 @@
mount;
}
+ private String[] getExecString() {
+ return new String[] {"df","-k",dirPath};
+ }
+
+ private void parseExecResult( BufferedReader lines ) throws IOException {
+ lines.readLine(); // skip headings
+
+ StringTokenizer tokens =
+ new StringTokenizer(lines.readLine(), " \t\n\r\f%");
+
+ this.filesystem = tokens.nextToken();
+ if (!tokens.hasMoreTokens()) { // for long filesystem name
+ tokens = new StringTokenizer(lines.readLine(), " \t\n\r\f%");
+ }
+ this.capacity = Long.parseLong(tokens.nextToken()) * 1024;
+ this.used = Long.parseLong(tokens.nextToken()) * 1024;
+ this.available = Long.parseLong(tokens.nextToken()) * 1024;
+ this.percentUsed = Integer.parseInt(tokens.nextToken());
+ this.mount = tokens.nextToken();
+ this.lastDF = System.currentTimeMillis();
+ }
+
public static void main(String[] args) throws Exception {
- System.out.println(new DF(args[0]));
+ String path = ".";
+ if( args.length > 0 )
+ path = args[0];
+
+ System.out.println(new DF(path, DF_INTERVAL_DEFAULT).toString());
}
}
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/LocalFileSystem.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/LocalFileSystem.java?rev=389916&r1=389915&r2=389916&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/LocalFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/LocalFileSystem.java Wed Mar 29 14:39:23 2006
@@ -360,7 +360,7 @@
f = makeAbsolute(f).getCanonicalFile();
// find highest writable parent dir of f on the same device
- String device = new DF(f.toString()).getMount();
+ String device = new DF(f.toString(), getConf()).getMount();
File parent = f.getParentFile();
File dir;
do {
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java?rev=389916&r1=389915&r2=389916&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java Wed Mar 29 14:39:23 2006
@@ -365,7 +365,7 @@
if (i != iDatanodeClosed) {
try {
if (checkDataDirsEmpty) {
- File dataDir = new File(dataNode.data.dirpath);
+ File dataDir = new File(dataNode.data.diskUsage.getDirPath());
assertNoBlocks(dataDir);
}