You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2009/09/08 20:03:55 UTC

svn commit: r812601 - in /hadoop/hdfs/trunk: ./ lib/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/

Author: suresh
Date: Tue Sep  8 18:03:54 2009
New Revision: 812601

URL: http://svn.apache.org/viewvc?rev=812601&view=rev
Log:
HDFS-578. Add support for new FileSystem method for clients to get server defaults. Contributed by Kan Zhang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/lib/hadoop-core-0.21.0-dev.jar
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue Sep  8 18:03:54 2009
@@ -22,6 +22,9 @@
     HDFS-492. Add two JSON JSP pages to the Namenode for providing corrupt
     blocks/replicas information.  (Bill Zeller via szetszwo)
 
+    HDFS-578. Add support for new FileSystem method for clients to get server
+    defaults. (Kan Zhang via suresh)
+
   IMPROVEMENTS
 
     HDFS-381. Remove blocks from DataNode maps when corresponding file

Modified: hadoop/hdfs/trunk/lib/hadoop-core-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/lib/hadoop-core-0.21.0-dev.jar?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Tue Sep  8 18:03:54 2009
@@ -67,6 +67,7 @@
 import org.apache.hadoop.fs.FSOutputSummer;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Path;
@@ -125,12 +126,15 @@
  ********************************************************/
 public class DFSClient implements FSConstants, java.io.Closeable {
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
+  public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
   public static final int MAX_BLOCK_ACQUIRE_FAILURES = 3;
   private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   private final ClientProtocol namenode;
   private final ClientProtocol rpcNamenode;
   final UnixUserGroupInformation ugi;
   volatile boolean clientRunning = true;
+  private volatile FsServerDefaults serverDefaults;
+  private volatile long serverDefaultsLastUpdate;
   Random r = new Random();
   final String clientName;
   final LeaseChecker leasechecker = new LeaseChecker();
@@ -329,6 +333,18 @@
   }
 
   /**
+   * Get server default values for a number of configuration params.
+   */
+  public FsServerDefaults getServerDefaults() throws IOException {
+    long now = System.currentTimeMillis();
+    if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
+      serverDefaults = namenode.getServerDefaults();
+      serverDefaultsLastUpdate = now;
+    }
+    return serverDefaults;
+  }
+
+  /**
    * Report corrupt blocks that were discovered by the client.
    */
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Tue Sep  8 18:03:54 2009
@@ -434,6 +434,12 @@
     dfs.metaSave(pathname);
   }
 
+  /** {@inheritDoc} */
+  @Override
+  public FsServerDefaults getServerDefaults() throws IOException {
+    return dfs.getServerDefaults();
+  }
+
   /**
    * We need to find the blocks that didn't match.  Likely only one 
    * is corrupt but we will report both to the namenode.  In the future,

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Tue Sep  8 18:03:54 2009
@@ -23,6 +23,7 @@
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
@@ -43,9 +44,9 @@
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 45: add create flag for create command, see Hadoop-5438
+   * 46: added a new method getServerDefaults(), see HDFS-578
    */
-  public static final long versionID = 45L;
+  public static final long versionID = 46L;
   
   ///////////////////////////////////////
   // File contents
@@ -74,6 +75,13 @@
                                           long length) throws IOException;
 
   /**
+   * Get server default values for a number of configuration params.
+   * @return a set of server default configuration values
+   * @throws IOException
+   */
+  public FsServerDefaults getServerDefaults() throws IOException;
+
+  /**
    * Create a new file entry in the namespace.
    * <p>
    * This will create an empty file specified by the source path.

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java Tue Sep  8 18:03:54 2009
@@ -53,6 +53,10 @@
   public static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512);
   //TODO mb@media-style.com: should be conf injected?
   public static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024;
+  public static final int DEFAULT_BYTES_PER_CHECKSUM = 512;
+  public static final int DEFAULT_WRITE_PACKET_SIZE = 64 * 1024;
+  public static final short DEFAULT_REPLICATION_FACTOR = 3;
+  public static final int DEFAULT_FILE_BUFFER_SIZE = 4096;
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
 
   public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Sep  8 18:03:54 2009
@@ -55,6 +55,7 @@
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.ipc.Server;
@@ -200,8 +201,7 @@
   private long heartbeatExpireInterval;
   //replicationRecheckInterval is how often namenode checks for new replication work
   private long replicationRecheckInterval;
-  // default block size of a file
-  private long defaultBlockSize = 0;
+  private FsServerDefaults serverDefaults;
   // allow appending to hdfs files
   private boolean supportAppends = true;
 
@@ -413,7 +413,12 @@
       10 * heartbeatInterval;
     this.replicationRecheckInterval = 
       conf.getInt("dfs.replication.interval", 3) * 1000L;
-    this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    this.serverDefaults = new FsServerDefaults(
+        conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE),
+        conf.getInt("io.bytes.per.checksum", DEFAULT_BYTES_PER_CHECKSUM),
+        conf.getInt("dfs.write.packet.size", DEFAULT_WRITE_PACKET_SIZE),
+        (short) conf.getInt("dfs.replication", DEFAULT_REPLICATION_FACTOR),
+        conf.getInt("io.file.buffer.size", DEFAULT_FILE_BUFFER_SIZE));
     this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
     this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 
                                          20*(int)(heartbeatInterval/1000));
@@ -505,7 +510,11 @@
   }
 
   long getDefaultBlockSize() {
-    return defaultBlockSize;
+    return serverDefaults.getBlockSize();
+  }
+
+  FsServerDefaults getServerDefaults() {
+    return serverDefaults;
   }
 
   long getAccessTimePrecision() {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Sep  8 18:03:54 2009
@@ -32,6 +32,7 @@
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -546,6 +547,11 @@
   }
 
   /** {@inheritDoc} */
+  public FsServerDefaults getServerDefaults() throws IOException {
+    return namesystem.getServerDefaults();
+  }
+
+  /** {@inheritDoc} */
   public void create(String src, 
                      FsPermission masked,
                              String clientName, 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Tue Sep  8 18:03:54 2009
@@ -155,6 +155,8 @@
     
     public LocatedBlocks  getBlockLocations(String src, long offset, long length) throws IOException { return null; }
     
+    public FsServerDefaults getServerDefaults() throws IOException { return null; }
+    
     public void create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, short replication, long blockSize) throws IOException {}
     
     public LocatedBlock append(String src, String clientName) throws IOException { return null; }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=812601&r1=812600&r2=812601&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Tue Sep  8 18:03:54 2009
@@ -32,6 +32,7 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -45,6 +46,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.log4j.Level;
 
 
@@ -171,6 +173,33 @@
   }
 
   /**
+   * Test that server default values can be retrieved on the client side
+   */
+  public void testServerDefaults() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.block.size", FSConstants.DEFAULT_BLOCK_SIZE);
+    conf.setInt("io.bytes.per.checksum", FSConstants.DEFAULT_BYTES_PER_CHECKSUM);
+    conf.setInt("dfs.write.packet.size", FSConstants.DEFAULT_WRITE_PACKET_SIZE);
+    conf.setInt("dfs.replication", FSConstants.DEFAULT_REPLICATION_FACTOR + 1);
+    conf.setInt("io.file.buffer.size", FSConstants.DEFAULT_FILE_BUFFER_SIZE);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf,
+        FSConstants.DEFAULT_REPLICATION_FACTOR + 1, true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+    try {
+      FsServerDefaults serverDefaults = fs.getServerDefaults();
+      assertEquals(FSConstants.DEFAULT_BLOCK_SIZE, serverDefaults.getBlockSize());
+      assertEquals(FSConstants.DEFAULT_BYTES_PER_CHECKSUM, serverDefaults.getBytesPerChecksum());
+      assertEquals(FSConstants.DEFAULT_WRITE_PACKET_SIZE, serverDefaults.getWritePacketSize());
+      assertEquals(FSConstants.DEFAULT_REPLICATION_FACTOR + 1, serverDefaults.getReplication());
+      assertEquals(FSConstants.DEFAULT_FILE_BUFFER_SIZE, serverDefaults.getFileBufferSize());
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
    * Test that file data becomes available before file is closed.
    */
   public void testFileCreation() throws IOException {