You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/12/02 18:41:48 UTC

svn commit: r1547122 [4/5] - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/dev-support/ h...

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java Mon Dec  2 17:41:44 2013
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs.web;
 
 import java.io.IOException;
-import java.security.GeneralSecurityException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -61,18 +60,8 @@ public class HsftpFileSystem extends Hft
   }
 
   @Override
-  protected void initConnectionFactoryAndTokenAspect(Configuration conf) throws IOException {
+  protected void initTokenAspect(Configuration conf) throws IOException {
     tokenAspect = new TokenAspect<HftpFileSystem>(this, TOKEN_KIND);
-
-    connectionFactory = new URLConnectionFactory(
-        URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
-    try {
-      connectionFactory.setConnConfigurator(URLConnectionFactory
-          .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
-              conf));
-    } catch (GeneralSecurityException e) {
-      throw new IOException(e);
-    }
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java Mon Dec  2 17:41:44 2013
@@ -17,10 +17,6 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import java.io.IOException;
-import java.security.GeneralSecurityException;
-
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.Text;
 
@@ -45,20 +41,6 @@ public class SWebHdfsFileSystem extends 
   }
 
   @Override
-  protected void initializeConnectionFactory(Configuration conf)
-      throws IOException {
-    connectionFactory = new URLConnectionFactory(
-        URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
-    try {
-      connectionFactory.setConnConfigurator(URLConnectionFactory
-          .newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
-              conf));
-    } catch (GeneralSecurityException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
   protected int getDefaultPort() {
     return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
         DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java Mon Dec  2 17:41:44 2013
@@ -39,6 +39,8 @@ import org.apache.hadoop.security.authen
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Utilities for handling URLs
  */
@@ -54,26 +56,50 @@ public class URLConnectionFactory {
    * Timeout for socket connects and reads
    */
   public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+  private final ConnectionConfigurator connConfigurator;
 
-  public static final URLConnectionFactory DEFAULT_CONNECTION_FACTORY = new URLConnectionFactory(
-      DEFAULT_SOCKET_TIMEOUT);
-
-  private int socketTimeout;
-
-  /** Configure connections for AuthenticatedURL */
-  private ConnectionConfigurator connConfigurator = new ConnectionConfigurator() {
+  private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR = new ConnectionConfigurator() {
     @Override
     public HttpURLConnection configure(HttpURLConnection conn)
         throws IOException {
-      URLConnectionFactory.setTimeouts(conn, socketTimeout);
+      URLConnectionFactory.setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT);
       return conn;
     }
   };
 
   /**
+   * The URLConnectionFactory that sets the default timeout and it only trusts
+   * Java's SSL certificates.
+   */
+  public static final URLConnectionFactory DEFAULT_SYSTEM_CONNECTION_FACTORY = new URLConnectionFactory(
+      DEFAULT_TIMEOUT_CONN_CONFIGURATOR);
+
+  /**
+   * Construct a new URLConnectionFactory based on the configuration. It will
+   * try to load SSL certificates when it is specified.
+   */
+  public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) {
+    ConnectionConfigurator conn = null;
+    try {
+      conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
+    } catch (Exception e) {
+      LOG.debug(
+          "Cannot load customized ssl related configuration. Fallback to system-generic settings.",
+          e);
+      conn = DEFAULT_TIMEOUT_CONN_CONFIGURATOR;
+    }
+    return new URLConnectionFactory(conn);
+  }
+
+  @VisibleForTesting
+  URLConnectionFactory(ConnectionConfigurator connConfigurator) {
+    this.connConfigurator = connConfigurator;
+  }
+
+  /**
    * Create a new ConnectionConfigurator for SSL connections
    */
-  static ConnectionConfigurator newSslConnConfigurator(final int timeout,
+  private static ConnectionConfigurator newSslConnConfigurator(final int timeout,
       Configuration conf) throws IOException, GeneralSecurityException {
     final SSLFactory factory;
     final SSLSocketFactory sf;
@@ -99,10 +125,6 @@ public class URLConnectionFactory {
     };
   }
 
-  public URLConnectionFactory(int socketTimeout) {
-    this.socketTimeout = socketTimeout;
-  }
-
   /**
    * Opens a url with read and connect timeouts
    *
@@ -153,14 +175,6 @@ public class URLConnectionFactory {
     }
   }
 
-  public ConnectionConfigurator getConnConfigurator() {
-    return connConfigurator;
-  }
-
-  public void setConnConfigurator(ConnectionConfigurator connConfigurator) {
-    this.connConfigurator = connConfigurator;
-  }
-
   /**
    * Sets timeout parameters on the given URLConnection.
    * 
@@ -169,7 +183,7 @@ public class URLConnectionFactory {
    * @param socketTimeout
    *          the connection and read timeout of the connection.
    */
-  static void setTimeouts(URLConnection connection, int socketTimeout) {
+  private static void setTimeouts(URLConnection connection, int socketTimeout) {
     connection.setConnectTimeout(socketTimeout);
     connection.setReadTimeout(socketTimeout);
   }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Mon Dec  2 17:41:44 2013
@@ -112,7 +112,7 @@ public class WebHdfsFileSystem extends F
   public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION;
 
   /** Default connection factory may be overridden in tests to use smaller timeout values */
-  URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+  protected URLConnectionFactory connectionFactory;
 
   /** Delegation token kind */
   public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
@@ -152,22 +152,15 @@ public class WebHdfsFileSystem extends F
     tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
   }
 
-  /**
-   * Initialize connectionFactory. This function is intended to
-   * be overridden by SWebHdfsFileSystem.
-   */
-  protected void initializeConnectionFactory(Configuration conf)
-      throws IOException {
-    connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
-  }
-
   @Override
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
     super.initialize(uri, conf);
     setConf(conf);
+    connectionFactory = URLConnectionFactory
+        .newDefaultURLConnectionFactory(conf);
     initializeTokenAspect();
-    initializeConnectionFactory(conf);
+
 
     ugi = UserGroupInformation.getCurrentUser();
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Mon Dec  2 17:41:44 2013
@@ -369,12 +369,19 @@ message CacheDirectiveInfoProto {
   optional string path = 2;
   optional uint32 replication = 3;
   optional string pool = 4;
+  optional CacheDirectiveInfoExpirationProto expiration = 5;
+}
+
+message CacheDirectiveInfoExpirationProto {
+  required int64 millis = 1;
+  required bool isRelative = 2;
 }
 
 message CacheDirectiveStatsProto {
   required int64 bytesNeeded = 1;
   required int64 bytesCached = 2;
   required int64 filesAffected = 3;
+  required bool hasExpired = 4;
 }
 
 message AddCacheDirectiveRequestProto {
@@ -422,6 +429,12 @@ message CachePoolInfoProto {
   optional int32 weight = 5;
 }
 
+message CachePoolStatsProto {
+  required int64 bytesNeeded = 1;
+  required int64 bytesCached = 2;
+  required int64 filesAffected = 3;
+}
+
 message AddCachePoolRequestProto {
   required CachePoolInfoProto info = 1;
 }
@@ -448,12 +461,13 @@ message ListCachePoolsRequestProto {
 }
 
 message ListCachePoolsResponseProto {
-  repeated ListCachePoolsResponseElementProto elements = 1;
+  repeated CachePoolEntryProto entries = 1;
   required bool hasMore = 2;
 }
 
-message ListCachePoolsResponseElementProto {
+message CachePoolEntryProto {
   required CachePoolInfoProto info = 1;
+  required CachePoolStatsProto stats = 2;
 }
 
 message GetFileLinkInfoRequestProto {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Mon Dec  2 17:41:44 2013
@@ -53,11 +53,12 @@ HDFS NFS Gateway
    * If the client mounts the export with access time update allowed, make sure the following 
     property is not disabled in the configuration file. Only NameNode needs to restart after 
     this property is changed. On some Unix systems, the user can disable access time update
-    by mounting the export with "noatime".
+    by mounting the export with "noatime". If the export is mounted with "noatime", the user 
+    doesn't need to change the following property and thus no need to restart namenode.
 
 ----
 <property>
-  <name>dfs.access.time.precision</name>
+  <name>dfs.namenode.accesstime.precision</name>
   <value>3600000</value>
   <description>The access time for HDFS file is precise upto this value. 
     The default value is 1 hour. Setting a value of 0 disables

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Mon Dec  2 17:41:44 2013
@@ -191,21 +191,25 @@ public class TestDFSRollback {
       // Create a previous snapshot for the blockpool
       UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
           UpgradeUtilities.getCurrentBlockPoolID(cluster));
-      // Older LayoutVersion to make it rollback
+      // Put newer layout version in current.
       storageInfo = new StorageInfo(
-          UpgradeUtilities.getCurrentLayoutVersion()+1,
+          UpgradeUtilities.getCurrentLayoutVersion()-1,
           UpgradeUtilities.getCurrentNamespaceID(cluster),
           UpgradeUtilities.getCurrentClusterID(cluster),
           UpgradeUtilities.getCurrentFsscTime(cluster));
-      // Create old VERSION file for each data dir
+
+      // Overwrite VERSION file in the current directory of
+      // volume directories and block pool slice directories
+      // with a layout version from future.
+      File[] dataCurrentDirs = new File[dataNodeDirs.length];
       for (int i=0; i<dataNodeDirs.length; i++) {
-        Path bpPrevPath = new Path(dataNodeDirs[i] + "/current/"
-            + UpgradeUtilities.getCurrentBlockPoolID(cluster));
-        UpgradeUtilities.createBlockPoolVersionFile(
-            new File(bpPrevPath.toString()),
-            storageInfo,
-            UpgradeUtilities.getCurrentBlockPoolID(cluster));
+        dataCurrentDirs[i] = new File((new Path(dataNodeDirs[i] 
+            + "/current")).toString());
       }
+      UpgradeUtilities.createDataNodeVersionFile(
+          dataCurrentDirs,
+          storageInfo,
+          UpgradeUtilities.getCurrentBlockPoolID(cluster));
 
       cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
       assertTrue(cluster.isDataNodeUp());

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Mon Dec  2 17:41:44 2013
@@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -62,6 +63,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.junit.Assume;
 import org.junit.Before;
@@ -724,4 +726,43 @@ public class TestDFSUtil {
         DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
         DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
   }
+
+  @Test(timeout=1000)
+  public void testDurationToString() throws Exception {
+    assertEquals("000:00:00:00", DFSUtil.durationToString(0));
+    try {
+      DFSUtil.durationToString(-199);
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Invalid negative duration", e);
+    }
+    assertEquals("001:01:01:01",
+        DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000));
+    assertEquals("000:23:59:59",
+        DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000));
+  }
+
+  @Test(timeout=5000)
+  public void testRelativeTimeConversion() throws Exception {
+    try {
+      DFSUtil.parseRelativeTime("1");
+    } catch (IOException e) {
+      assertExceptionContains("too short", e);
+    }
+    try {
+      DFSUtil.parseRelativeTime("1z");
+    } catch (IOException e) {
+      assertExceptionContains("unknown time unit", e);
+    }
+    try {
+      DFSUtil.parseRelativeTime("yyz");
+    } catch (IOException e) {
+      assertExceptionContains("is not a number", e);
+    }
+    assertEquals(61*1000, DFSUtil.parseRelativeTime("61s"));
+    assertEquals(61*60*1000, DFSUtil.parseRelativeTime("61m"));
+    assertEquals(0, DFSUtil.parseRelativeTime("0s"));
+    assertEquals(25*60*60*1000, DFSUtil.parseRelativeTime("25h"));
+    assertEquals(4*24*60*60*1000, DFSUtil.parseRelativeTime("4d"));
+    assertEquals(999*24*60*60*1000, DFSUtil.parseRelativeTime("999d"));
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Mon Dec  2 17:41:44 2013
@@ -368,10 +368,7 @@ public class TestQuota {
     // be identical.
     conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-    final FileSystem fs = cluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-                fs instanceof DistributedFileSystem);
-    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+    final DistributedFileSystem dfs = cluster.getFileSystem();
     
     try {
       // 1: create directory /nqdir0/qdir1/qdir20/nqdir30

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java Mon Dec  2 17:41:44 2013
@@ -91,7 +91,6 @@ public class TestIPCLoggerChannel {
    */
   @Test
   public void testQueueLimiting() throws Exception {
-    
     // Block the underlying fake proxy from actually completing any calls.
     DelayAnswer delayer = new DelayAnswer(LOG);
     Mockito.doAnswer(delayer).when(mockProxy).journal(

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java Mon Dec  2 17:41:44 2013
@@ -25,6 +25,8 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.List;
 
+import junit.framework.Assert;
+
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger;
@@ -124,7 +126,7 @@ public class TestQuorumJournalManagerUni
       .when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
     qjm.startLogSegment(1);
   }
-  
+
   @Test
   public void testQuorumOfLoggersFail() throws Exception {
     futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
@@ -141,6 +143,16 @@ public class TestQuorumJournalManagerUni
   }
   
   @Test
+  public void testQuorumOutputStreamReport() throws Exception {
+    futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
+    futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong());
+    futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
+    QuorumOutputStream os = (QuorumOutputStream) qjm.startLogSegment(1);
+    String report = os.generateReport();
+    Assert.assertFalse("Report should be plain text", report.contains("<"));
+  }
+
+  @Test
   public void testWriteEdits() throws Exception {
     EditLogOutputStream stm = createLogSegment();
     writeOp(stm, 1);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Mon Dec  2 17:41:44 2013
@@ -532,6 +532,11 @@ public class SimulatedFSDataset implemen
     return 0l;
   }
 
+  @Override // FSDatasetMBean
+  public long getNumBlocksCached() {
+    return 0l;
+  }
+
   @Override
   public long getNumBlocksFailedToCache() {
     return 0l;

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Mon Dec  2 17:41:44 2013
@@ -49,8 +49,8 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -72,6 +72,8 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 
 import com.google.common.base.Supplier;
 
@@ -95,6 +97,7 @@ public class TestFsDatasetCache {
 
   static {
     EditLogFileOutputStream.setShouldSkipFsyncForTesting(false);
+    LogManager.getLogger(FsDatasetCache.class).setLevel(Level.DEBUG);
   }
 
   @Before
@@ -110,6 +113,9 @@ public class TestFsDatasetCache {
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY, true);
 
+    prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
+    NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
+
     cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(1).build();
     cluster.waitActive();
@@ -122,8 +128,6 @@ public class TestFsDatasetCache {
 
     spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);
 
-    prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
-    NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
   }
 
   @After
@@ -201,17 +205,21 @@ public class TestFsDatasetCache {
   /**
    * Blocks until cache usage hits the expected new value.
    */
-  private long verifyExpectedCacheUsage(final long expected) throws Exception {
+  private long verifyExpectedCacheUsage(final long expectedCacheUsed,
+      final long expectedBlocks) throws Exception {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       private int tries = 0;
       
       @Override
       public Boolean get() {
-        long curDnCacheUsed = fsd.getCacheUsed();
-        if (curDnCacheUsed != expected) {
+        long curCacheUsed = fsd.getCacheUsed();
+        long curBlocks = fsd.getNumBlocksCached();
+        if ((curCacheUsed != expectedCacheUsed) ||
+            (curBlocks != expectedBlocks)) {
           if (tries++ > 10) {
-            LOG.info("verifyExpectedCacheUsage: expected " +
-                expected + ", got " + curDnCacheUsed + "; " +
+            LOG.info("verifyExpectedCacheUsage: have " +
+                curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " +
+                curBlocks + "/" + expectedBlocks + " blocks cached. " +
                 "memlock limit = " +
                 NativeIO.POSIX.getCacheManipulator().getMemlockLimit() +
                 ".  Waiting...");
@@ -221,14 +229,15 @@ public class TestFsDatasetCache {
         return true;
       }
     }, 100, 60000);
-    return expected;
+    return expectedCacheUsed;
   }
 
   private void testCacheAndUncacheBlock() throws Exception {
     LOG.info("beginning testCacheAndUncacheBlock");
     final int NUM_BLOCKS = 5;
 
-    verifyExpectedCacheUsage(0);
+    verifyExpectedCacheUsage(0, 0);
+    assertEquals(0, fsd.getNumBlocksCached());
 
     // Write a test file
     final Path testFile = new Path("/testCacheBlock");
@@ -255,7 +264,7 @@ public class TestFsDatasetCache {
     // Cache each block in succession, checking each time
     for (int i=0; i<NUM_BLOCKS; i++) {
       setHeartbeatResponse(cacheBlock(locs[i]));
-      current = verifyExpectedCacheUsage(current + blockSizes[i]);
+      current = verifyExpectedCacheUsage(current + blockSizes[i], i + 1);
       dnMetrics = getMetrics(dn.getMetrics().name());
       long cmds = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
       assertTrue("Expected more cache requests from the NN ("
@@ -267,7 +276,8 @@ public class TestFsDatasetCache {
     // Uncache each block in succession, again checking each time
     for (int i=0; i<NUM_BLOCKS; i++) {
       setHeartbeatResponse(uncacheBlock(locs[i]));
-      current = verifyExpectedCacheUsage(current - blockSizes[i]);
+      current = verifyExpectedCacheUsage(current - blockSizes[i],
+          NUM_BLOCKS - 1 - i);
       dnMetrics = getMetrics(dn.getMetrics().name());
       long cmds = MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics);
       assertTrue("Expected more uncache requests from the NN",
@@ -334,10 +344,11 @@ public class TestFsDatasetCache {
 
     // Cache the first n-1 files
     long total = 0;
-    verifyExpectedCacheUsage(0);
+    verifyExpectedCacheUsage(0, 0);
     for (int i=0; i<numFiles-1; i++) {
       setHeartbeatResponse(cacheBlocks(fileLocs[i]));
-      total = verifyExpectedCacheUsage(rounder.round(total + fileSizes[i]));
+      total = verifyExpectedCacheUsage(
+          rounder.round(total + fileSizes[i]), 4 * (i + 1));
     }
 
     // nth file should hit a capacity exception
@@ -363,7 +374,7 @@ public class TestFsDatasetCache {
     for (int i=0; i<numFiles-1; i++) {
       setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
       total -= rounder.round(fileSizes[i]);
-      verifyExpectedCacheUsage(total);
+      verifyExpectedCacheUsage(total, 4 * (numFiles - 2 - i));
     }
     LOG.info("finishing testFilesExceedMaxLockedMemory");
   }
@@ -373,7 +384,7 @@ public class TestFsDatasetCache {
     LOG.info("beginning testUncachingBlocksBeforeCachingFinishes");
     final int NUM_BLOCKS = 5;
 
-    verifyExpectedCacheUsage(0);
+    verifyExpectedCacheUsage(0, 0);
 
     // Write a test file
     final Path testFile = new Path("/testCacheBlock");
@@ -409,7 +420,7 @@ public class TestFsDatasetCache {
     // should increase, even though caching doesn't complete on any of them.
     for (int i=0; i<NUM_BLOCKS; i++) {
       setHeartbeatResponse(cacheBlock(locs[i]));
-      current = verifyExpectedCacheUsage(current + blockSizes[i]);
+      current = verifyExpectedCacheUsage(current + blockSizes[i], i + 1);
     }
     
     setHeartbeatResponse(new DatanodeCommand[] {
@@ -417,7 +428,7 @@ public class TestFsDatasetCache {
     });
 
     // wait until all caching jobs are finished cancelling.
-    current = verifyExpectedCacheUsage(0);
+    current = verifyExpectedCacheUsage(0, 0);
     LOG.info("finishing testUncachingBlocksBeforeCachingFinishes");
   }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java Mon Dec  2 17:41:44 2013
@@ -33,10 +33,12 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Date;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 
+import org.apache.commons.lang.time.DateUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -54,12 +56,13 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
@@ -263,8 +266,8 @@ public class TestCacheDirectives {
         setOwnerName(ownerName).setGroupName(groupName).
         setMode(mode).setWeight(weight));
     
-    RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
-    CachePoolInfo info = iter.next();
+    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
+    CachePoolInfo info = iter.next().getInfo();
     assertEquals(poolName, info.getPoolName());
     assertEquals(ownerName, info.getOwnerName());
     assertEquals(groupName, info.getGroupName());
@@ -278,7 +281,7 @@ public class TestCacheDirectives {
         setMode(mode).setWeight(weight));
 
     iter = dfs.listCachePools();
-    info = iter.next();
+    info = iter.next().getInfo();
     assertEquals(poolName, info.getPoolName());
     assertEquals(ownerName, info.getOwnerName());
     assertEquals(groupName, info.getGroupName());
@@ -507,9 +510,9 @@ public class TestCacheDirectives {
         .setGroupName(groupName)
         .setMode(mode)
         .setWeight(weight));
-    RemoteIterator<CachePoolInfo> pit = dfs.listCachePools();
+    RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
     assertTrue("No cache pools found", pit.hasNext());
-    CachePoolInfo info = pit.next();
+    CachePoolInfo info = pit.next().getInfo();
     assertEquals(pool, info.getPoolName());
     assertEquals(groupName, info.getGroupName());
     assertEquals(mode, info.getMode());
@@ -520,10 +523,14 @@ public class TestCacheDirectives {
     int numEntries = 10;
     String entryPrefix = "/party-";
     long prevId = -1;
+    final Date expiry = new Date();
     for (int i=0; i<numEntries; i++) {
       prevId = dfs.addCacheDirective(
           new CacheDirectiveInfo.Builder().
-            setPath(new Path(entryPrefix + i)).setPool(pool).build());
+            setPath(new Path(entryPrefix + i)).setPool(pool).
+            setExpiration(
+                CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
+            build());
     }
     RemoteIterator<CacheDirectiveEntry> dit
         = dfs.listCacheDirectives(null);
@@ -542,7 +549,7 @@ public class TestCacheDirectives {
     // Check that state came back up
     pit = dfs.listCachePools();
     assertTrue("No cache pools found", pit.hasNext());
-    info = pit.next();
+    info = pit.next().getInfo();
     assertEquals(pool, info.getPoolName());
     assertEquals(pool, info.getPoolName());
     assertEquals(groupName, info.getGroupName());
@@ -557,6 +564,7 @@ public class TestCacheDirectives {
       assertEquals(i+1, cd.getId().longValue());
       assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
       assertEquals(pool, cd.getPool());
+      assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
     }
     assertFalse("Unexpected # of cache directives found", dit.hasNext());
 
@@ -713,7 +721,16 @@ public class TestCacheDirectives {
     try {
       cluster.waitActive();
       DistributedFileSystem dfs = cluster.getFileSystem();
-      NameNode namenode = cluster.getNameNode();
+      final NameNode namenode = cluster.getNameNode();
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          return ((namenode.getNamesystem().getCacheCapacity() ==
+              (NUM_DATANODES * CACHE_CAPACITY)) &&
+                (namenode.getNamesystem().getCacheUsed() == 0));
+        }
+      }, 500, 60000);
+
       NamenodeProtocols nnRpc = namenode.getRpcServer();
       Path rootDir = helper.getDefaultWorkingDirectory(dfs);
       // Create the pool
@@ -967,8 +984,8 @@ public class TestCacheDirectives {
     dfs.addCachePool(new CachePoolInfo(poolName)
         .setMode(new FsPermission((short)0700)));
     // Should only see partial info
-    RemoteIterator<CachePoolInfo> it = myDfs.listCachePools();
-    CachePoolInfo info = it.next();
+    RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
+    CachePoolInfo info = it.next().getInfo();
     assertFalse(it.hasNext());
     assertEquals("Expected pool name", poolName, info.getPoolName());
     assertNull("Unexpected owner name", info.getOwnerName());
@@ -981,7 +998,7 @@ public class TestCacheDirectives {
         .setWeight(99));
     // Should see full info
     it = myDfs.listCachePools();
-    info = it.next();
+    info = it.next().getInfo();
     assertFalse(it.hasNext());
     assertEquals("Expected pool name", poolName, info.getPoolName());
     assertEquals("Mismatched owner name", myUser.getShortUserName(),
@@ -991,4 +1008,58 @@ public class TestCacheDirectives {
         info.getMode().toShort());
     assertEquals("Mismatched weight", 99, (int)info.getWeight());
   }
+
+  @Test(timeout=60000)
+  public void testExpiry() throws Exception {
+    HdfsConfiguration conf = createCachingConf();
+    MiniDFSCluster cluster =
+      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
+    try {
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      String pool = "pool1";
+      dfs.addCachePool(new CachePoolInfo(pool));
+      Path p = new Path("/mypath");
+      DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999);
+      // Expire after test timeout
+      Date start = new Date();
+      Date expiry = DateUtils.addSeconds(start, 120);
+      final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
+          .setPath(p)
+          .setPool(pool)
+          .setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry))
+          .setReplication((short)2)
+          .build());
+      waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
+      // Change it to expire sooner
+      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
+          .setExpiration(Expiration.newRelative(0)).build());
+      waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
+      RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
+      CacheDirectiveEntry ent = it.next();
+      assertFalse(it.hasNext());
+      Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
+      assertTrue("Directive should have expired",
+          entryExpiry.before(new Date()));
+      // Change it back to expire later
+      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
+          .setExpiration(Expiration.newRelative(120000)).build());
+      waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
+      it = dfs.listCacheDirectives(null);
+      ent = it.next();
+      assertFalse(it.hasNext());
+      entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
+      assertTrue("Directive should not have expired",
+          entryExpiry.after(new Date()));
+      // Verify that setting a negative TTL throws an error
+      try {
+        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
+            .setExpiration(Expiration.newRelative(-1)).build());
+      } catch (InvalidRequestException e) {
+        GenericTestUtils
+            .assertExceptionContains("Cannot set a negative expiration", e);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java Mon Dec  2 17:41:44 2013
@@ -20,64 +20,47 @@ package org.apache.hadoop.hdfs.server.na
 import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 
-import java.io.IOException;
-import java.io.OutputStream;
+import java.io.ByteArrayInputStream;
+import java.net.HttpURLConnection;
 import java.net.URL;
 import java.util.EnumMap;
 
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.util.Holder;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestEditLogFileInputStream {
   private static final byte[] FAKE_LOG_DATA = TestEditLog.HADOOP20_SOME_EDITS;
 
   @Test
   public void testReadURL() throws Exception {
-    // Start a simple web server which hosts the log data.
-    HttpServer server = new HttpServer.Builder().setName("test")
-        .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build();
-    server.start();
-    try {
-      server.addServlet("fakeLog", "/fakeLog", FakeLogServlet.class);
-      URL url = new URL("http://localhost:" + server.getPort() + "/fakeLog");
-      EditLogInputStream elis = EditLogFileInputStream.fromUrl(
-          url, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
-          false);
-      // Read the edit log and verify that we got all of the data.
-      EnumMap<FSEditLogOpCodes, Holder<Integer>> counts =
-          FSImageTestUtil.countEditLogOpTypes(elis);
-      assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
-      assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
-      assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));
-
-      // Check that length header was picked up.
-      assertEquals(FAKE_LOG_DATA.length, elis.length());
-      elis.close();
-    } finally {
-      server.stop();
-    }
-  }
-
-  @SuppressWarnings("serial")
-  public static class FakeLogServlet extends HttpServlet {
-    @Override
-    public void doGet(HttpServletRequest request, 
-                      HttpServletResponse response
-                      ) throws ServletException, IOException {
-      response.setHeader("Content-Length",
-          String.valueOf(FAKE_LOG_DATA.length));
-      OutputStream out = response.getOutputStream();
-      out.write(FAKE_LOG_DATA);
-      out.close();
-    }
+    HttpURLConnection conn = mock(HttpURLConnection.class);
+    doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
+    doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
+    doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");
+
+    URLConnectionFactory factory = mock(URLConnectionFactory.class);
+    doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
+        anyBoolean());
+
+    URL url = new URL("http://localhost/fakeLog");
+    EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
+        HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false);
+    // Read the edit log and verify that we got all of the data.
+    EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
+        .countEditLogOpTypes(elis);
+    assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
+    assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
+    assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));
+
+    // Check that length header was picked up.
+    assertEquals(FAKE_LOG_DATA.length, elis.length());
+    elis.close();
   }
-
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Mon Dec  2 17:41:44 2013
@@ -26,24 +26,27 @@ import java.io.IOException;
 
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 /**
  * Test the EditLogFileOutputStream
  */
 public class TestEditLogFileOutputStream {
-  private final static File TEST_DIR = PathUtils.getTestDir(TestEditLogFileOutputStream.class);
-  private static final File TEST_EDITS =
-      new File(TEST_DIR, "testEditLogFileOutput.log");
-  final static int MIN_PREALLOCATION_LENGTH =
-      EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH;
+  private final static File TEST_DIR = PathUtils
+      .getTestDir(TestEditLogFileOutputStream.class);
+  private static final File TEST_EDITS = new File(TEST_DIR,
+      "testEditLogFileOutput.log");
+  final static int MIN_PREALLOCATION_LENGTH = EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH;
 
   private Configuration conf;
 
-  static {
+  @BeforeClass
+  public static void disableFsync() {
     // No need to fsync for the purposes of tests. This makes
     // the tests run much faster.
     EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
@@ -52,7 +55,8 @@ public class TestEditLogFileOutputStream
   @Before
   @After
   public void deleteEditsFile() {
-    if (TEST_EDITS.exists()) TEST_EDITS.delete();
+    if (TEST_EDITS.exists())
+      TEST_EDITS.delete();
   }
 
   @Before
@@ -66,17 +70,17 @@ public class TestEditLogFileOutputStream
     elos.flushAndSync(true);
     assertEquals(expectedLength, elos.getFile().length());
   }
-  
+
   /**
-   * Tests writing to the EditLogFileOutputStream.  Due to preallocation, the
+   * Tests writing to the EditLogFileOutputStream. Due to preallocation, the
    * length of the edit log will usually be longer than its valid contents.
    */
   @Test
   public void testRawWrites() throws IOException {
-    EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, TEST_EDITS,
-      0);
+    EditLogFileOutputStream elos = new EditLogFileOutputStream(conf,
+        TEST_EDITS, 0);
     try {
-      byte[] small = new byte[] {1,2,3,4,5,8,7};
+      byte[] small = new byte[] { 1, 2, 3, 4, 5, 8, 7 };
       elos.create();
       // The first (small) write we make extends the file by 1 MB due to
       // preallocation.
@@ -101,7 +105,8 @@ public class TestEditLogFileOutputStream
       }
       flushAndCheckLength(elos, 4 * MIN_PREALLOCATION_LENGTH);
     } finally {
-      if (elos != null) elos.close();
+      if (elos != null)
+        elos.close();
     }
   }
 
@@ -112,8 +117,8 @@ public class TestEditLogFileOutputStream
   @Test
   public void testEditLogFileOutputStreamCloseAbort() throws IOException {
     // abort after a close should just ignore
-    EditLogFileOutputStream editLogStream =
-      new EditLogFileOutputStream(conf, TEST_EDITS, 0);
+    EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf,
+        TEST_EDITS, 0);
     editLogStream.close();
     editLogStream.abort();
   }
@@ -125,8 +130,8 @@ public class TestEditLogFileOutputStream
   @Test
   public void testEditLogFileOutputStreamCloseClose() throws IOException {
     // close after a close should result in an IOE
-    EditLogFileOutputStream editLogStream =
-      new EditLogFileOutputStream(conf, TEST_EDITS, 0);
+    EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf,
+        TEST_EDITS, 0);
     editLogStream.close();
     try {
       editLogStream.close();
@@ -135,7 +140,7 @@ public class TestEditLogFileOutputStream
       assertTrue(msg, msg.contains("Trying to use aborted output stream"));
     }
   }
-  
+
   /**
    * Tests EditLogFileOutputStream doesn't throw NullPointerException on being
    * abort/abort sequence. See HDFS-2011.
@@ -143,9 +148,13 @@ public class TestEditLogFileOutputStream
   @Test
   public void testEditLogFileOutputStreamAbortAbort() throws IOException {
     // abort after a close should just ignore
-    EditLogFileOutputStream editLogStream =
-      new EditLogFileOutputStream(conf, TEST_EDITS, 0);
-    editLogStream.abort();
-    editLogStream.abort();
+    EditLogFileOutputStream editLogStream = null;
+    try {
+      editLogStream = new EditLogFileOutputStream(conf, TEST_EDITS, 0);
+      editLogStream.abort();
+      editLogStream.abort();
+    } finally {
+      IOUtils.cleanup(null, editLogStream);
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java Mon Dec  2 17:41:44 2013
@@ -158,7 +158,7 @@ public class TestFSImageWithSnapshot {
     try {
       loader.load(imageFile);
       FSImage.updateCountForQuota(
-          (INodeDirectoryWithQuota)fsn.getFSDirectory().getINode("/"));
+          INodeDirectory.valueOf(fsn.getFSDirectory().getINode("/"), "/"));
     } finally {
       fsn.getFSDirectory().writeUnlock();
       fsn.writeUnlock();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Mon Dec  2 17:41:44 2013
@@ -49,7 +49,7 @@ public class TestFsLimits {
   static PermissionStatus perms
     = new PermissionStatus("admin", "admin", FsPermission.getDefault());
 
-  static INodeDirectoryWithQuota rootInode;
+  static INodeDirectory rootInode;
 
   static private FSNamesystem getMockNamesystem() {
     FSNamesystem fsn = mock(FSNamesystem.class);
@@ -75,8 +75,8 @@ public class TestFsLimits {
              fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
                                 "namenode")).toString());
 
-    rootInode = new INodeDirectoryWithQuota(getMockNamesystem()
-        .allocateNewInodeId(), INodeDirectory.ROOT_NAME, perms);
+    rootInode = new INodeDirectory(getMockNamesystem().allocateNewInodeId(),
+        INodeDirectory.ROOT_NAME, perms, 0L);
     inodes = new INode[]{ rootInode, null };
     fs = null;
     fsIsReady = true;

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Mon Dec  2 17:41:44 2013
@@ -1058,4 +1058,31 @@ public class TestFsck {
       if (cluster != null) { cluster.shutdown(); }
     }
   }
+
+  /**
+   * Test for including the snapshot files in fsck report
+   */
+  @Test
+  public void testFsckForSnapshotFiles() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .build();
+    try {
+      String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
+          "-files");
+      assertTrue(runFsck.contains("HEALTHY"));
+      final String fileName = "/srcdat";
+      DistributedFileSystem hdfs = cluster.getFileSystem();
+      Path file1 = new Path(fileName);
+      DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
+      hdfs.allowSnapshot(new Path("/"));
+      hdfs.createSnapshot(new Path("/"), "mySnapShot");
+      runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
+      assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
+      runFsck = runFsck(conf, 0, true, "/", "-files");
+      assertFalse(runFsck.contains("mySnapShot"));
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Mon Dec  2 17:41:44 2013
@@ -211,9 +211,9 @@ public class TestINodeFile {
       // Call FSDirectory#unprotectedSetQuota which calls
       // INodeDirectory#replaceChild
       dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
-      INode dirNode = fsdir.getINode(dir.toString());
+      INodeDirectory dirNode = getDir(fsdir, dir);
       assertEquals(dir.toString(), dirNode.getFullPathName());
-      assertTrue(dirNode instanceof INodeDirectoryWithQuota);
+      assertTrue(dirNode.isWithQuota());
       
       final Path newDir = new Path("/newdir");
       final Path newFile = new Path(newDir, "file");
@@ -871,6 +871,12 @@ public class TestINodeFile {
     }
   }
   
+  private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
+      throws IOException {
+    final String dirStr = dir.toString();
+    return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
+  }
+
   /**
    * Test whether the inode in inodeMap has been replaced after regular inode
    * replacement
@@ -887,21 +893,20 @@ public class TestINodeFile {
 
       final Path dir = new Path("/dir");
       hdfs.mkdirs(dir);
-      INode dirNode = fsdir.getINode(dir.toString());
+      INodeDirectory dirNode = getDir(fsdir, dir);
       INode dirNodeFromNode = fsdir.getInode(dirNode.getId());
       assertSame(dirNode, dirNodeFromNode);
 
       // set quota to dir, which leads to node replacement
       hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
-      dirNode = fsdir.getINode(dir.toString());
-      assertTrue(dirNode instanceof INodeDirectoryWithQuota);
+      dirNode = getDir(fsdir, dir);
+      assertTrue(dirNode.isWithQuota());
       // the inode in inodeMap should also be replaced
       dirNodeFromNode = fsdir.getInode(dirNode.getId());
       assertSame(dirNode, dirNodeFromNode);
 
       hdfs.setQuota(dir, -1, -1);
-      dirNode = fsdir.getINode(dir.toString());
-      assertTrue(dirNode instanceof INodeDirectory);
+      dirNode = getDir(fsdir, dir);
       // the inode in inodeMap should also be replaced
       dirNodeFromNode = fsdir.getInode(dirNode.getId());
       assertSame(dirNode, dirNodeFromNode);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Mon Dec  2 17:41:44 2013
@@ -31,7 +31,10 @@ import javax.management.ObjectName;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
 import org.apache.hadoop.util.VersionInfo;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
@@ -46,10 +49,16 @@ public class TestNameNodeMXBean {
    */
   private static final double DELTA = 0.000001;
 
-  @SuppressWarnings({ "unchecked", "deprecation" })
+  static {
+    NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
+  }
+
+  @SuppressWarnings({ "unchecked" })
   @Test
   public void testNameNodeMXBeanInfo() throws Exception {
     Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
+      NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
     MiniDFSCluster cluster = null;
 
     try {
@@ -152,7 +161,7 @@ public class TestNameNodeMXBean {
       assertEquals(0, statusMap.get("failed").size());
       
       // This will cause the first dir to fail.
-      File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
+      File failedNameDir = new File(nameDirUris.iterator().next());
       assertEquals(0, FileUtil.chmod(
         new File(failedNameDir, "current").getAbsolutePath(), "000"));
       cluster.getNameNodeRpc().rollEditLog();
@@ -171,6 +180,10 @@ public class TestNameNodeMXBean {
       }
       assertEquals(1, statusMap.get("active").size());
       assertEquals(1, statusMap.get("failed").size());
+      assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
+      assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * 
+          cluster.getDataNodes().size(),
+              mbs.getAttribute(mxbeanName, "CacheCapacity"));
     } finally {
       if (cluster != null) {
         for (URI dir : cluster.getNameDirs(0)) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Mon Dec  2 17:41:44 2013
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNT
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -905,7 +906,7 @@ public class TestRetryCacheWithHA {
     @Override
     boolean checkNamenodeBeforeReturn() throws Exception {
       for (int i = 0; i < CHECKTIMES; i++) {
-        RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
+        RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
         if (iter.hasNext()) {
           return true;
         }
@@ -942,8 +943,8 @@ public class TestRetryCacheWithHA {
     @Override
     boolean checkNamenodeBeforeReturn() throws Exception {
       for (int i = 0; i < CHECKTIMES; i++) {
-        RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
-        if (iter.hasNext() && iter.next().getWeight() == 99) {
+        RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
+        if (iter.hasNext() && iter.next().getInfo().getWeight() == 99) {
           return true;
         }
         Thread.sleep(1000);
@@ -979,7 +980,7 @@ public class TestRetryCacheWithHA {
     @Override
     boolean checkNamenodeBeforeReturn() throws Exception {
       for (int i = 0; i < CHECKTIMES; i++) {
-        RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
+        RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
         if (!iter.hasNext()) {
           return true;
         }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Mon Dec  2 17:41:44 2013
@@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INodeReference;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
 import org.apache.hadoop.hdfs.server.namenode.Quota;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
 import org.apache.hadoop.hdfs.util.Diff.ListType;
@@ -1190,13 +1189,15 @@ public class TestRenameWithSnapshots {
     assertFalse(hdfs.exists(bar_s2));
     restartClusterAndCheckImage(true);
     // make sure the whole referred subtree has been destroyed
-    assertEquals(4, fsdir.getRoot().getNamespace());
-    assertEquals(0, fsdir.getRoot().getDiskspace());
+    Quota.Counts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(4, q.get(Quota.NAMESPACE));
+    assertEquals(0, q.get(Quota.DISKSPACE));
     
     hdfs.deleteSnapshot(sdir1, "s1");
     restartClusterAndCheckImage(true);
-    assertEquals(3, fsdir.getRoot().getNamespace());
-    assertEquals(0, fsdir.getRoot().getDiskspace());
+    q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(3, q.get(Quota.NAMESPACE));
+    assertEquals(0, q.get(Quota.DISKSPACE));
   }
   
   /**
@@ -1938,10 +1939,12 @@ public class TestRenameWithSnapshots {
     // check
     final INodeDirectorySnapshottable dir1Node = 
         (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
-    assertEquals(4, dir1Node.getNamespace());
+    Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(4, q1.get(Quota.NAMESPACE));
     final INodeDirectorySnapshottable dir2Node = 
         (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
-    assertEquals(2, dir2Node.getNamespace());
+    Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(2, q2.get(Quota.NAMESPACE));
     
     final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
         foo.getName());
@@ -2005,10 +2008,12 @@ public class TestRenameWithSnapshots {
     final INodeDirectorySnapshottable dir1Node = 
         (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
     // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
-    assertEquals(9, dir1Node.getNamespace());
+    Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(9, q1.get(Quota.NAMESPACE));
     final INodeDirectorySnapshottable dir2Node = 
         (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
-    assertEquals(2, dir2Node.getNamespace());
+    Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(2, q2.get(Quota.NAMESPACE));
     
     final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
         foo.getName());

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Mon Dec  2 17:41:44 2013
@@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -157,15 +156,21 @@ public class TestSnapshotDeletion {
     hdfs.delete(dir, true);
   }
   
+  private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
+      throws IOException {
+    final String dirStr = dir.toString();
+    return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
+  }
+
   private void checkQuotaUsageComputation(final Path dirPath,
       final long expectedNs, final long expectedDs) throws IOException {
-    INode node = fsdir.getINode(dirPath.toString());
-    assertTrue(node.isDirectory() && node.isQuotaSet());
-    INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
+    INodeDirectory dirNode = getDir(fsdir, dirPath);
+    assertTrue(dirNode.isQuotaSet());
+    Quota.Counts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
-        dirNode.getNamespace());
+        q.get(Quota.NAMESPACE));
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
-        dirNode.getDiskspace());
+        q.get(Quota.DISKSPACE));
     Quota.Counts counts = Quota.Counts.newInstance();
     dirNode.computeQuotaUsage(counts, false);
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java Mon Dec  2 17:41:44 2013
@@ -92,12 +92,15 @@ public class TestSnapshotDiffReport {
     Path file11 = new Path(modifyDir, "file11");
     Path file12 = new Path(modifyDir, "file12");
     Path file13 = new Path(modifyDir, "file13");
+    Path link13 = new Path(modifyDir, "link13");
     Path file14 = new Path(modifyDir, "file14");
     Path file15 = new Path(modifyDir, "file15");
     DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REPLICATION_1, seed);
     DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION_1, seed);
     DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REPLICATION_1, seed);
     DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, seed);
+    // create link13
+    hdfs.createSymlink(file13, link13, false);
     // create snapshot
     for (Path snapshotDir : snapshotDirs) {
       hdfs.allowSnapshot(snapshotDir);
@@ -110,6 +113,8 @@ public class TestSnapshotDiffReport {
     hdfs.setReplication(file12, REPLICATION);
     // modify file13
     hdfs.setReplication(file13, REPLICATION);
+    // delete link13
+    hdfs.delete(link13, false);
     // create file14
     DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, seed);
     // create file15
@@ -126,6 +131,8 @@ public class TestSnapshotDiffReport {
     hdfs.delete(file12, true);
     // modify file13
     hdfs.setReplication(file13, (short) (REPLICATION - 2));
+    // create link13 again
+    hdfs.createSymlink(file13, link13, false);
     // delete file14
     hdfs.delete(file14, true);
     // modify file15
@@ -222,7 +229,9 @@ public class TestSnapshotDiffReport {
         new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")),
         new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")),
         new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")),
-        new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")));
+        new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")),
+        new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")),
+        new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")));
 
     verifyDiffReport(sub1, "s0", "s5", 
         new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
@@ -232,6 +241,8 @@ public class TestSnapshotDiffReport {
         new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")),
         new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")),
         new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")),
+        new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")),
+        new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")),
         new DiffReportEntry(DiffType.MODIFY,
             DFSUtil.string2Bytes("subsub1/subsubsub1")),
         new DiffReportEntry(DiffType.CREATE,
@@ -241,6 +252,8 @@ public class TestSnapshotDiffReport {
         new DiffReportEntry(DiffType.CREATE,
             DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
         new DiffReportEntry(DiffType.CREATE,
+            DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
+        new DiffReportEntry(DiffType.CREATE,
             DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
     
     verifyDiffReport(sub1, "s2", "s5",
@@ -254,6 +267,8 @@ public class TestSnapshotDiffReport {
         new DiffReportEntry(DiffType.CREATE,
             DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
         new DiffReportEntry(DiffType.CREATE,
+            DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
+        new DiffReportEntry(DiffType.CREATE,
             DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
     
     verifyDiffReport(sub1, "s3", "",
@@ -270,7 +285,11 @@ public class TestSnapshotDiffReport {
         new DiffReportEntry(DiffType.CREATE,
             DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
         new DiffReportEntry(DiffType.MODIFY,
-            DFSUtil.string2Bytes("subsub1/subsubsub1/file13")));
+            DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
+        new DiffReportEntry(DiffType.CREATE,
+            DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
+        new DiffReportEntry(DiffType.DELETE,
+            DFSUtil.string2Bytes("subsub1/subsubsub1/link13")));
   }
   
   /**
@@ -300,7 +319,11 @@ public class TestSnapshotDiffReport {
         new DiffReportEntry(DiffType.CREATE,
             DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
         new DiffReportEntry(DiffType.MODIFY,
-            DFSUtil.string2Bytes("subsub1/subsubsub1/file13")));
+            DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
+        new DiffReportEntry(DiffType.CREATE,
+            DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
+        new DiffReportEntry(DiffType.DELETE,
+            DFSUtil.string2Bytes("subsub1/subsubsub1/link13")));
     // check diff report between s0 and the current status
     verifyDiffReport(sub1, "s0", "", 
         new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java Mon Dec  2 17:41:44 2013
@@ -305,7 +305,7 @@ public class TestDiff {
     final int i = Diff.search(current, inode.getKey());
     Assert.assertTrue(i >= 0);
     final INodeDirectory oldinode = (INodeDirectory)current.get(i);
-    final INodeDirectory newinode = new INodeDirectory(oldinode, false);
+    final INodeDirectory newinode = new INodeDirectory(oldinode, false, true);
     newinode.setModificationTime(oldinode.getModificationTime() + 1);
 
     current.set(i, newinode);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java Mon Dec  2 17:41:44 2013
@@ -34,10 +34,7 @@ public final class TestURLConnectionFact
   public void testConnConfiguratior() throws IOException {
     final URL u = new URL("http://localhost");
     final List<HttpURLConnection> conns = Lists.newArrayList();
-    URLConnectionFactory fc = new URLConnectionFactory(
-        URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
-
-    fc.setConnConfigurator(new ConnectionConfigurator() {
+    URLConnectionFactory fc = new URLConnectionFactory(new ConnectionConfigurator() {
       @Override
       public HttpURLConnection configure(HttpURLConnection conn)
           throws IOException {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java Mon Dec  2 17:41:44 2013
@@ -26,6 +26,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
+import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
@@ -41,6 +42,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -66,7 +68,14 @@ public class TestWebHdfsTimeouts {
   private InetSocketAddress nnHttpAddress;
   private ServerSocket serverSocket;
   private Thread serverThread;
-  private URLConnectionFactory connectionFactory = new URLConnectionFactory(SHORT_SOCKET_TIMEOUT);
+  private URLConnectionFactory connectionFactory = new URLConnectionFactory(new ConnectionConfigurator() {
+    @Override
+    public HttpURLConnection configure(HttpURLConnection conn) throws IOException {
+      conn.setReadTimeout(SHORT_SOCKET_TIMEOUT);
+      conn.setConnectTimeout(SHORT_SOCKET_TIMEOUT);
+      return conn;
+    }
+  });
 
   @Before
   public void setUp() throws Exception {
@@ -82,7 +91,6 @@ public class TestWebHdfsTimeouts {
 
   @After
   public void tearDown() throws Exception {
-    fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
     IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()]));
     IOUtils.cleanup(LOG, fs);
     if (serverSocket != null) {
@@ -242,7 +250,7 @@ public class TestWebHdfsTimeouts {
    */
   private void startSingleTemporaryRedirectResponseThread(
       final boolean consumeConnectionBacklog) {
-    fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+    fs.connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
     serverThread = new Thread() {
       @Override
       public void run() {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java Mon Dec  2 17:41:44 2013
@@ -83,7 +83,7 @@ public class TestDelegationTokenRemoteFe
 
   private static final String EXP_DATE = "124123512361236";
   private static final String tokenFile = "http.file.dta";
-  private static final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+  private static final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
 
   private int httpPort;
   private URI serviceUrl;

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1547122&r1=1547121&r2=1547122&view=diff
==============================================================================
Binary files - no diff available.