You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2012/08/18 01:24:38 UTC

svn commit: r1374474 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src: main/java/org/apache/hadoop/hdfs/protocolPB/ main/java/org/apache/hadoop/hdfs/server/namenode/ main/proto/ test/java/org/apache/hadoop/hdfs/

Author: eli
Date: Fri Aug 17 23:24:36 2012
New Revision: 1374474

URL: http://svn.apache.org/viewvc?rev=1374474&view=rev
Log:
HADOOP-8689. Make trash a server side configuration option. Contributed by Eli Collins

Modified:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1374474&r1=1374473&r2=1374474&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Fri Aug 17 23:24:36 2012
@@ -1002,7 +1002,8 @@ public class PBHelper {
         fs.getBlockSize(), fs.getBytesPerChecksum(), 
         fs.getWritePacketSize(), (short) fs.getReplication(),
         fs.getFileBufferSize(),
-        fs.getEncryptDataTransfer());
+        fs.getEncryptDataTransfer(),
+        fs.getTrashInterval());
   }
   
   public static FsServerDefaultsProto convert(FsServerDefaults fs) {
@@ -1013,7 +1014,8 @@ public class PBHelper {
       setWritePacketSize(fs.getWritePacketSize())
       .setReplication(fs.getReplication())
       .setFileBufferSize(fs.getFileBufferSize())
-      .setEncryptDataTransfer(fs.getEncryptDataTransfer()).build();
+      .setEncryptDataTransfer(fs.getEncryptDataTransfer())
+      .setTrashInterval(fs.getTrashInterval()).build();
   }
   
   public static FsPermissionProto convert(FsPermission p) {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1374474&r1=1374473&r2=1374474&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Aug 17 23:24:36 2012
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
@@ -104,6 +106,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -464,7 +467,8 @@ public class FSNamesystem implements Nam
           conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
           (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
           conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),
-          conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT));
+          conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT),
+          conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT));
       
       this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, 
                                        DFS_NAMENODE_MAX_OBJECTS_DEFAULT);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1374474&r1=1374473&r2=1374474&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Aug 17 23:24:36 2012
@@ -514,9 +514,7 @@ public class NameNode {
   }
   
   private void startTrashEmptier(Configuration conf) throws IOException {
-    long trashInterval = conf.getLong(
-        CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
-        CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
+    long trashInterval = namesystem.getServerDefaults().getTrashInterval();  
     if (trashInterval == 0) {
       return;
     } else if (trashInterval < 0) {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1374474&r1=1374473&r2=1374474&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Fri Aug 17 23:24:36 2012
@@ -188,6 +188,7 @@ message FsServerDefaultsProto {
   required uint32 replication = 4; // Actually a short - only 16 bits used
   required uint32 fileBufferSize = 5;
   optional bool encryptDataTransfer = 6 [default = false];
+  optional uint64 trashInterval = 7 [default = 0];
 }
 
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java?rev=1374474&r1=1374473&r2=1374474&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java Fri Aug 17 23:24:36 2012
@@ -23,12 +23,18 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.TestTrash;
+import org.apache.hadoop.fs.Trash;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 /**
- * This class tests commands from Trash.
+ * Test trash using HDFS
  */
 public class TestHDFSTrash {
   private static MiniDFSCluster cluster = null;
@@ -44,9 +50,6 @@ public class TestHDFSTrash {
     if (cluster != null) { cluster.shutdown(); }
   }
 
-  /**
-   * Tests Trash on HDFS
-   */
   @Test
   public void testTrash() throws IOException {
     TestTrash.trashShell(cluster.getFileSystem(), new Path("/"));
@@ -60,4 +63,52 @@ public class TestHDFSTrash {
     TestTrash.trashNonDefaultFS(conf);
   }
 
+  /** Clients should always use trash if enabled server side */
+  @Test
+  public void testTrashEnabledServerSide() throws IOException {
+    Configuration serverConf = new HdfsConfiguration();
+    Configuration clientConf = new Configuration();
+
+    // Enable trash on the server and client
+    serverConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+
+    MiniDFSCluster cluster2 = null;
+    try {
+      cluster2 = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
+      FileSystem fs = cluster2.getFileSystem();
+      assertTrue(new Trash(fs, clientConf).isEnabled());
+
+      // Disabling trash on the client is ignored
+      clientConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
+      assertTrue(new Trash(fs, clientConf).isEnabled());
+    } finally {
+      if (cluster2 != null) cluster2.shutdown();
+    }
+  }
+
+  /** Clients should always use trash if enabled client side */
+  @Test
+  public void testTrashEnabledClientSide() throws IOException {
+    Configuration serverConf = new HdfsConfiguration();
+    Configuration clientConf = new Configuration();
+    
+    // Disable server side
+    serverConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
+
+    MiniDFSCluster cluster2 = null;
+    try {
+      cluster2 = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
+
+      // Client side is disabled by default
+      FileSystem fs = cluster2.getFileSystem();
+      assertFalse(new Trash(fs, clientConf).isEnabled());
+
+      // Enabling on the client works even though its disabled on the server
+      clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+      assertTrue(new Trash(fs, clientConf).isEnabled());
+    } finally {
+      if (cluster2 != null) cluster2.shutdown();
+    }
+  }
 }