You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/10/13 22:34:51 UTC

[01/51] [abbrv] hadoop git commit: HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ef84ac469 -> 841742cdd


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3059b251/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 4b7eebd..aa41e9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -57,8 +57,11 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import com.google.common.base.Supplier;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
@@ -74,7 +77,6 @@ import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
@@ -116,44 +118,49 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 import org.apache.log4j.RollingFileAppender;
+import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.collect.Sets;
 
 /**
- * A JUnit test for doing fsck
+ * A JUnit test for doing fsck.
  */
 public class TestFsck {
+  private static final Log LOG =
+      LogFactory.getLog(TestFsck.class.getName());
+
   static final String AUDITLOG_FILE =
       GenericTestUtils.getTempPath("TestFsck-audit.log");
   
   // Pattern for: 
   // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
-  static final Pattern fsckPattern = Pattern.compile(
+  static final Pattern FSCK_PATTERN = Pattern.compile(
       "allowed=.*?\\s" +
       "ugi=.*?\\s" + 
       "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
       "cmd=fsck\\ssrc=\\/\\sdst=null\\s" + 
       "perm=null\\s" + "proto=.*");
-  static final Pattern getfileinfoPattern = Pattern.compile(
+  static final Pattern GET_FILE_INFO_PATTERN = Pattern.compile(
       "allowed=.*?\\s" +
       "ugi=.*?\\s" + 
       "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
       "cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" + 
       "perm=null\\s" + "proto=.*");
 
-  static final Pattern numMissingBlocksPattern = Pattern.compile(
+  static final Pattern NUM_MISSING_BLOCKS_PATTERN = Pattern.compile(
       ".*Missing blocks:\t\t([0123456789]*).*");
 
-  static final Pattern numCorruptBlocksPattern = Pattern.compile(
+  static final Pattern NUM_CORRUPT_BLOCKS_PATTERN = Pattern.compile(
       ".*Corrupt blocks:\t\t([0123456789]*).*");
   
   private static final String LINE_SEPARATOR =
-    System.getProperty("line.separator");
+      System.getProperty("line.separator");
 
   static String runFsck(Configuration conf, int expectedErrCode, 
-                        boolean checkErrorCode,String... path)
+                        boolean checkErrorCode, String... path)
                         throws Exception {
     ByteArrayOutputStream bStream = new ByteArrayOutputStream();
     PrintStream out = new PrintStream(bStream, true);
@@ -163,60 +170,72 @@ public class TestFsck {
       assertEquals(expectedErrCode, errCode);
     }
     GenericTestUtils.setLogLevel(FSPermissionChecker.LOG, Level.INFO);
-    FSImage.LOG.info("OUTPUT = " + bStream.toString());
+    LOG.info("OUTPUT = " + bStream.toString());
     return bStream.toString();
   }
 
-  /** do fsck */
+  private MiniDFSCluster cluster = null;
+  private Configuration conf = null;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new Configuration();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    shutdownCluster();
+  }
+
+  private void shutdownCluster() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /** do fsck. */
   @Test
   public void testFsck() throws Exception {
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
         setNumFiles(20).build();
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      final long precision = 1L;
-      conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      fs = cluster.getFileSystem();
-      final String fileName = "/srcdat";
-      util.createFiles(fs, fileName);
-      util.waitReplication(fs, fileName, (short)3);
-      final Path file = new Path(fileName);
-      long aTime = fs.getFileStatus(file).getAccessTime();
-      Thread.sleep(precision);
-      setupAuditLogs();
-      String outStr = runFsck(conf, 0, true, "/");
-      verifyAuditLogs();
-      assertEquals(aTime, fs.getFileStatus(file).getAccessTime());
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      cluster.shutdown();
-      
-      // restart the cluster; bring up namenode but not the data nodes
-      cluster = new MiniDFSCluster.Builder(conf)
-          .numDataNodes(0).format(false).build();
-      outStr = runFsck(conf, 1, true, "/");
-      // expect the result is corrupt
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-      System.out.println(outStr);
-      
-      // bring up data nodes & cleanup cluster
-      cluster.startDataNodes(conf, 4, true, null, null);
-      cluster.waitActive();
-      cluster.waitClusterUp();
-      fs = cluster.getFileSystem();
-      util.cleanup(fs, "/srcdat");
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
-    }
+    final long precision = 1L;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
+        precision);
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    fs = cluster.getFileSystem();
+    final String fileName = "/srcdat";
+    util.createFiles(fs, fileName);
+    util.waitReplication(fs, fileName, (short)3);
+    final Path file = new Path(fileName);
+    long aTime = fs.getFileStatus(file).getAccessTime();
+    Thread.sleep(precision);
+    setupAuditLogs();
+    String outStr = runFsck(conf, 0, true, "/");
+    verifyAuditLogs();
+    assertEquals(aTime, fs.getFileStatus(file).getAccessTime());
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    shutdownCluster();
+
+    // restart the cluster; bring up namenode but not the data nodes
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(0).format(false).build();
+    outStr = runFsck(conf, 1, true, "/");
+    // expect the result is corrupt
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+    System.out.println(outStr);
+
+    // bring up data nodes & cleanup cluster
+    cluster.startDataNodes(conf, 4, true, null, null);
+    cluster.waitActive();
+    cluster.waitClusterUp();
+    fs = cluster.getFileSystem();
+    util.cleanup(fs, "/srcdat");
   }
 
-  /** Sets up log4j logger for auditlogs */
+  /** Sets up log4j logger for auditlogs. */
   private void setupAuditLogs() throws IOException {
     File file = new File(AUDITLOG_FILE);
     if (file.exists()) {
@@ -247,11 +266,11 @@ public class TestFsck {
         line = reader.readLine();
         assertNotNull(line);
         assertTrue("Expected getfileinfo event not found in audit log",
-            getfileinfoPattern.matcher(line).matches());
+            GET_FILE_INFO_PATTERN.matcher(line).matches());
       }
       line = reader.readLine();
       assertNotNull(line);
-      assertTrue("Expected fsck event not found in audit log", fsckPattern
+      assertTrue("Expected fsck event not found in audit log", FSCK_PATTERN
           .matcher(line).matches());
       assertNull("Unexpected event in audit log", reader.readLine());
     } finally {
@@ -270,175 +289,155 @@ public class TestFsck {
   public void testFsckNonExistent() throws Exception {
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
         setNumFiles(20).build();
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      fs = cluster.getFileSystem();
-      util.createFiles(fs, "/srcdat");
-      util.waitReplication(fs, "/srcdat", (short)3);
-      String outStr = runFsck(conf, 0, true, "/non-existent");
-      assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
-      System.out.println(outStr);
-      util.cleanup(fs, "/srcdat");
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
-    }
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    fs = cluster.getFileSystem();
+    util.createFiles(fs, "/srcdat");
+    util.waitReplication(fs, "/srcdat", (short)3);
+    String outStr = runFsck(conf, 0, true, "/non-existent");
+    assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
+    System.out.println(outStr);
+    util.cleanup(fs, "/srcdat");
   }
 
-  /** Test fsck with permission set on inodes */
+  /** Test fsck with permission set on inodes. */
   @Test
   public void testFsckPermission() throws Exception {
     final DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(20).build();
-    final Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
 
-    MiniDFSCluster cluster = null;
-    try {
-      // Create a cluster with the current user, write some files
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      final MiniDFSCluster c2 = cluster;
-      final String dir = "/dfsck";
-      final Path dirpath = new Path(dir);
-      final FileSystem fs = c2.getFileSystem();
-
-      util.createFiles(fs, dir);
-      util.waitReplication(fs, dir, (short) 3);
-      fs.setPermission(dirpath, new FsPermission((short) 0700));
-
-      // run DFSck as another user, should fail with permission issue
-      UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
-          "ProbablyNotARealUserName", new String[] { "ShangriLa" });
-      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws Exception {
-          System.out.println(runFsck(conf, -1, true, dir));
-          return null;
-        }
-      });
-      
-      // set permission and try DFSck again as the fake user, should succeed
-      fs.setPermission(dirpath, new FsPermission((short) 0777));
-      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws Exception {
-          final String outStr = runFsck(conf, 0, true, dir);
-          System.out.println(outStr);
-          assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-          return null;
-        }
-      });
+    // Create a cluster with the current user, write some files
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    final MiniDFSCluster c2 = cluster;
+    final String dir = "/dfsck";
+    final Path dirpath = new Path(dir);
+    final FileSystem fs = c2.getFileSystem();
+
+    util.createFiles(fs, dir);
+    util.waitReplication(fs, dir, (short) 3);
+    fs.setPermission(dirpath, new FsPermission((short) 0700));
+
+    // run DFSck as another user, should fail with permission issue
+    UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
+        "ProbablyNotARealUserName", new String[] {"ShangriLa"});
+    fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        System.out.println(runFsck(conf, -1, true, dir));
+        return null;
+      }
+    });
 
-      util.cleanup(fs, dir);
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
+    // set permission and try DFSck again as the fake user, should succeed
+    fs.setPermission(dirpath, new FsPermission((short) 0777));
+    fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final String outStr = runFsck(conf, 0, true, dir);
+        System.out.println(outStr);
+        assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+        return null;
+      }
+    });
+
+    util.cleanup(fs, dir);
   }
 
   @Test
   public void testFsckMove() throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    final int DFS_BLOCK_SIZE = 1024;
-    final int NUM_DATANODES = 4;
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE);
+    final int dfsBlockSize = 1024;
+    final int numDatanodes = 4;
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, dfsBlockSize);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
-        (5 * DFS_BLOCK_SIZE) + (DFS_BLOCK_SIZE - 1), 5 * DFS_BLOCK_SIZE);
-    MiniDFSCluster cluster = null;
+        (5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize);
     FileSystem fs = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).
-          numDataNodes(NUM_DATANODES).build();
-      String topDir = "/srcdat";
-      fs = cluster.getFileSystem();
-      cluster.waitActive();
-      util.createFiles(fs, topDir);
-      util.waitReplication(fs, topDir, (short)3);
-      String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
-                                          cluster.getNameNodePort()), conf);
-      String fileNames[] = util.getFileNames(topDir);
-      CorruptedTestFile ctFiles[] = new CorruptedTestFile[] {
+    cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(numDatanodes).build();
+    String topDir = "/srcdat";
+    fs = cluster.getFileSystem();
+    cluster.waitActive();
+    util.createFiles(fs, topDir);
+    util.waitReplication(fs, topDir, (short)3);
+    String outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                        cluster.getNameNodePort()), conf);
+    String[] fileNames = util.getFileNames(topDir);
+    CorruptedTestFile[] ctFiles = new CorruptedTestFile[]{
         new CorruptedTestFile(fileNames[0], Sets.newHashSet(0),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
+            dfsClient, numDatanodes, dfsBlockSize),
         new CorruptedTestFile(fileNames[1], Sets.newHashSet(2, 3),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
+            dfsClient, numDatanodes, dfsBlockSize),
         new CorruptedTestFile(fileNames[2], Sets.newHashSet(4),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
+            dfsClient, numDatanodes, dfsBlockSize),
         new CorruptedTestFile(fileNames[3], Sets.newHashSet(0, 1, 2, 3),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
+            dfsClient, numDatanodes, dfsBlockSize),
         new CorruptedTestFile(fileNames[4], Sets.newHashSet(1, 2, 3, 4),
-          dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE)
-      };
-      int totalMissingBlocks = 0;
-      for (CorruptedTestFile ctFile : ctFiles) {
-        totalMissingBlocks += ctFile.getTotalMissingBlocks();
-      }
-      for (CorruptedTestFile ctFile : ctFiles) {
-        ctFile.removeBlocks(cluster);
-      }
-      // Wait for fsck to discover all the missing blocks
-      while (true) {
-        outStr = runFsck(conf, 1, false, "/");
-        String numMissing = null;
-        String numCorrupt = null;
-        for (String line : outStr.split(LINE_SEPARATOR)) {
-          Matcher m = numMissingBlocksPattern.matcher(line);
-          if (m.matches()) {
-            numMissing = m.group(1);
-          }
-          m = numCorruptBlocksPattern.matcher(line);
-          if (m.matches()) {
-            numCorrupt = m.group(1);
-          }
-          if (numMissing != null && numCorrupt != null) {
-            break;
-          }
+            dfsClient, numDatanodes, dfsBlockSize)
+    };
+    int totalMissingBlocks = 0;
+    for (CorruptedTestFile ctFile : ctFiles) {
+      totalMissingBlocks += ctFile.getTotalMissingBlocks();
+    }
+    for (CorruptedTestFile ctFile : ctFiles) {
+      ctFile.removeBlocks(cluster);
+    }
+    // Wait for fsck to discover all the missing blocks
+    while (true) {
+      outStr = runFsck(conf, 1, false, "/");
+      String numMissing = null;
+      String numCorrupt = null;
+      for (String line : outStr.split(LINE_SEPARATOR)) {
+        Matcher m = NUM_MISSING_BLOCKS_PATTERN.matcher(line);
+        if (m.matches()) {
+          numMissing = m.group(1);
         }
-        if (numMissing == null || numCorrupt == null) {
-          throw new IOException("failed to find number of missing or corrupt" +
-              " blocks in fsck output.");
+        m = NUM_CORRUPT_BLOCKS_PATTERN.matcher(line);
+        if (m.matches()) {
+          numCorrupt = m.group(1);
         }
-        if (numMissing.equals(Integer.toString(totalMissingBlocks))) {
-          assertTrue(numCorrupt.equals(Integer.toString(0)));
-          assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+        if (numMissing != null && numCorrupt != null) {
           break;
         }
-        try {
-          Thread.sleep(100);
-        } catch (InterruptedException ignore) {
-        }
       }
-
-      // Copy the non-corrupt blocks of corruptFileName to lost+found.
-      outStr = runFsck(conf, 1, false, "/", "-move");
-      FSImage.LOG.info("WATERMELON: outStr = " + outStr);
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-
-      // Make sure that we properly copied the block files from the DataNodes
-      // to lost+found
-      for (CorruptedTestFile ctFile : ctFiles) {
-        ctFile.checkSalvagedRemains();
+      if (numMissing == null || numCorrupt == null) {
+        throw new IOException("failed to find number of missing or corrupt" +
+            " blocks in fsck output.");
+      }
+      if (numMissing.equals(Integer.toString(totalMissingBlocks))) {
+        assertTrue(numCorrupt.equals(Integer.toString(0)));
+        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+        break;
       }
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException ignore) {
+      }
+    }
 
-      // Fix the filesystem by removing corruptFileName
-      outStr = runFsck(conf, 1, true, "/", "-delete");
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-      
-      // Check to make sure we have a healthy filesystem
-      outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); 
-      util.cleanup(fs, topDir);
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
+    // Copy the non-corrupt blocks of corruptFileName to lost+found.
+    outStr = runFsck(conf, 1, false, "/", "-move");
+    LOG.info("WATERMELON: outStr = " + outStr);
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+
+    // Make sure that we properly copied the block files from the DataNodes
+    // to lost+found
+    for (CorruptedTestFile ctFile : ctFiles) {
+      ctFile.checkSalvagedRemains();
     }
+
+    // Fix the filesystem by removing corruptFileName
+    outStr = runFsck(conf, 1, true, "/", "-delete");
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+
+    // Check to make sure we have a healthy filesystem
+    outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    util.cleanup(fs, topDir);
   }
 
   static private class CorruptedTestFile {
@@ -449,7 +448,7 @@ public class TestFsck {
     final private int blockSize;
     final private byte[] initialContents;
     
-    public CorruptedTestFile(String name, Set<Integer> blocksToCorrupt,
+    CorruptedTestFile(String name, Set<Integer> blocksToCorrupt,
         DFSClient dfsClient, int numDataNodes, int blockSize)
             throws IOException {
       this.name = name;
@@ -505,7 +504,7 @@ public class TestFsck {
                 new FileOutputStream(blockFile, false);
             blockFileStream.write("corrupt".getBytes());
             blockFileStream.close();
-            FSImage.LOG.info("Corrupted block file " + blockFile);
+            LOG.info("Corrupted block file " + blockFile);
           }
         }
       }
@@ -536,7 +535,9 @@ public class TestFsck {
           if (blockIdx == (numBlocks - 1)) {
             // The last block might not be full-length
             len = (int)(in.getFileLength() % blockSize);
-            if (len == 0) len = blockBuffer.length;
+            if (len == 0) {
+              len = blockBuffer.length;
+            }
           }
           IOUtils.readFully(in, blockBuffer, 0, len);
           int startIdx = blockIdx * blockSize;
@@ -555,218 +556,186 @@ public class TestFsck {
   
   @Test
   public void testFsckMoveAndDelete() throws Exception {
-    final int MAX_MOVE_TRIES = 5;
+    final int maxMoveTries = 5;
     DFSTestUtil util = new DFSTestUtil.Builder().
         setName("TestFsckMoveAndDelete").setNumFiles(5).build();
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      String topDir = "/srcdat";
-      fs = cluster.getFileSystem();
-      cluster.waitActive();
-      util.createFiles(fs, topDir);
-      util.waitReplication(fs, topDir, (short)3);
-      String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      
-      // Corrupt a block by deleting it
-      String[] fileNames = util.getFileNames(topDir);
-      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
-                                          cluster.getNameNodePort()), conf);
-      String corruptFileName = fileNames[0];
-      ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
-          corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
-      for (int i=0; i<4; i++) {
-        File blockFile = cluster.getBlockFile(i, block);
-        if(blockFile != null && blockFile.exists()) {
-          assertTrue(blockFile.delete());
-        }
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    String topDir = "/srcdat";
+    fs = cluster.getFileSystem();
+    cluster.waitActive();
+    util.createFiles(fs, topDir);
+    util.waitReplication(fs, topDir, (short)3);
+    String outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+    // Corrupt a block by deleting it
+    String[] fileNames = util.getFileNames(topDir);
+    DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                        cluster.getNameNodePort()), conf);
+    String corruptFileName = fileNames[0];
+    ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
+        corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
+    for (int i=0; i<4; i++) {
+      File blockFile = cluster.getBlockFile(i, block);
+      if(blockFile != null && blockFile.exists()) {
+        assertTrue(blockFile.delete());
       }
+    }
 
-      // We excpect the filesystem to be corrupted
-      outStr = runFsck(conf, 1, false, "/");
-      while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
-        try {
-          Thread.sleep(100);
-        } catch (InterruptedException ignore) {
-        }
-        outStr = runFsck(conf, 1, false, "/");
-      } 
-      
-      // After a fsck -move, the corrupted file should still exist.
-      for (int i = 0; i < MAX_MOVE_TRIES; i++) {
-        outStr = runFsck(conf, 1, true, "/", "-move" );
-        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-        String[] newFileNames = util.getFileNames(topDir);
-        boolean found = false;
-        for (String f : newFileNames) {
-          if (f.equals(corruptFileName)) {
-            found = true;
-            break;
-          }
-        }
-        assertTrue(found);
+    // We excpect the filesystem to be corrupted
+    outStr = runFsck(conf, 1, false, "/");
+    while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException ignore) {
       }
+      outStr = runFsck(conf, 1, false, "/");
+    }
 
-      // Fix the filesystem by moving corrupted files to lost+found
-      outStr = runFsck(conf, 1, true, "/", "-move", "-delete");
+    // After a fsck -move, the corrupted file should still exist.
+    for (int i = 0; i < maxMoveTries; i++) {
+      outStr = runFsck(conf, 1, true, "/", "-move");
       assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-      
-      // Check to make sure we have healthy filesystem
-      outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); 
-      util.cleanup(fs, topDir);
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      cluster.shutdown();
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
+      String[] newFileNames = util.getFileNames(topDir);
+      boolean found = false;
+      for (String f : newFileNames) {
+        if (f.equals(corruptFileName)) {
+          found = true;
+          break;
+        }
+      }
+      assertTrue(found);
     }
+
+    // Fix the filesystem by moving corrupted files to lost+found
+    outStr = runFsck(conf, 1, true, "/", "-move", "-delete");
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+
+    // Check to make sure we have healthy filesystem
+    outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    util.cleanup(fs, topDir);
   }
   
   @Test
   public void testFsckOpenFiles() throws Exception {
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
         setNumFiles(4).build();
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      String topDir = "/srcdat";
-      String randomString = "HADOOP  ";
-      fs = cluster.getFileSystem();
-      cluster.waitActive();
-      util.createFiles(fs, topDir);
-      util.waitReplication(fs, topDir, (short)3);
-      String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      // Open a file for writing and do not close for now
-      Path openFile = new Path(topDir + "/openFile");
-      FSDataOutputStream out = fs.create(openFile);
-      int writeCount = 0;
-      while (writeCount != 100) {
-        out.write(randomString.getBytes());
-        writeCount++;                  
-      }
-      ((DFSOutputStream) out.getWrappedStream()).hflush();
-      // We expect the filesystem to be HEALTHY and show one open file
-      outStr = runFsck(conf, 0, true, topDir);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertFalse(outStr.contains("OPENFORWRITE")); 
-      // Use -openforwrite option to list open files
-      outStr = runFsck(conf, 0, true, topDir, "-files", "-blocks",
-          "-locations", "-openforwrite");
-      System.out.println(outStr);
-      assertTrue(outStr.contains("OPENFORWRITE"));
-      assertTrue(outStr.contains("Under Construction Block:"));
-      assertTrue(outStr.contains("openFile"));
-      // Close the file
-      out.close(); 
-      // Now, fsck should show HEALTHY fs and should not show any open files
-      outStr = runFsck(conf, 0, true, topDir);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertFalse(outStr.contains("OPENFORWRITE"));
-      assertFalse(outStr.contains("Under Construction Block:"));
-      util.cleanup(fs, topDir);
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      cluster.shutdown();
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    String topDir = "/srcdat";
+    String randomString = "HADOOP  ";
+    fs = cluster.getFileSystem();
+    cluster.waitActive();
+    util.createFiles(fs, topDir);
+    util.waitReplication(fs, topDir, (short)3);
+    String outStr = runFsck(conf, 0, true, "/");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    // Open a file for writing and do not close for now
+    Path openFile = new Path(topDir + "/openFile");
+    FSDataOutputStream out = fs.create(openFile);
+    int writeCount = 0;
+    while (writeCount != 100) {
+      out.write(randomString.getBytes());
+      writeCount++;
     }
+    ((DFSOutputStream) out.getWrappedStream()).hflush();
+    // We expect the filesystem to be HEALTHY and show one open file
+    outStr = runFsck(conf, 0, true, topDir);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertFalse(outStr.contains("OPENFORWRITE"));
+    // Use -openforwrite option to list open files
+    outStr = runFsck(conf, 0, true, topDir, "-files", "-blocks",
+        "-locations", "-openforwrite");
+    System.out.println(outStr);
+    assertTrue(outStr.contains("OPENFORWRITE"));
+    assertTrue(outStr.contains("Under Construction Block:"));
+    assertTrue(outStr.contains("openFile"));
+    // Close the file
+    out.close();
+    // Now, fsck should show HEALTHY fs and should not show any open files
+    outStr = runFsck(conf, 0, true, topDir);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertFalse(outStr.contains("OPENFORWRITE"));
+    assertFalse(outStr.contains("Under Construction Block:"));
+    util.cleanup(fs, topDir);
   }
 
   @Test
   public void testFsckOpenECFiles() throws Exception {
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsckECFile").
         setNumFiles(4).build();
-    Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
     ErasureCodingPolicy ecPolicy =
         ErasureCodingPolicyManager.getSystemDefaultPolicy();
     int numAllUnits = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
         numAllUnits + 1).build();
     FileSystem fs = null;
-    try {
-      String topDir = "/myDir";
-      byte[] randomBytes = new byte[3000000];
-      int seed = 42;
-      new Random(seed).nextBytes(randomBytes);
-      cluster.waitActive();
-      fs = cluster.getFileSystem();
-      util.createFiles(fs, topDir);
-      // set topDir to EC when it has replicated files
-      cluster.getFileSystem().getClient().setErasureCodingPolicy(
-          topDir, ecPolicy);
-
-      // create a new file under topDir
-      DFSTestUtil.createFile(fs, new Path(topDir, "ecFile"), 1024, (short) 1, 0L);
-      // Open a EC file for writing and do not close for now
-      Path openFile = new Path(topDir + "/openECFile");
-      FSDataOutputStream out = fs.create(openFile);
-      int writeCount = 0;
-      while (writeCount != 300) {
-        out.write(randomBytes);
-        writeCount++;
-      }
-
-      // make sure the fsck can correctly handle mixed ec/replicated files
-      runFsck(conf, 0, true, topDir, "-files", "-blocks", "-openforwrite");
-
-      // We expect the filesystem to be HEALTHY and show one open file
-      String outStr = runFsck(conf, 0, true, openFile.toString(), "-files",
-          "-blocks", "-openforwrite");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("OPENFORWRITE"));
-      assertTrue(outStr.contains("Live_repl=" + numAllUnits));
-      assertTrue(outStr.contains("Expected_repl=" + numAllUnits));
-
-      // Use -openforwrite option to list open files
-      outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks",
-          "-locations", "-openforwrite", "-replicaDetails");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("OPENFORWRITE"));
-      assertTrue(outStr.contains("Live_repl=" + numAllUnits));
-      assertTrue(outStr.contains("Expected_repl=" + numAllUnits));
-      assertTrue(outStr.contains("Under Construction Block:"));
-
-      // Close the file
-      out.close();
-
-      // Now, fsck should show HEALTHY fs and should not show any open files
-      outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks",
-          "-locations", "-racks", "-replicaDetails");
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertFalse(outStr.contains("OPENFORWRITE"));
-      assertFalse(outStr.contains("Under Construction Block:"));
-      assertFalse(outStr.contains("Expected_repl=" + numAllUnits));
-      assertTrue(outStr.contains("Live_repl=" + numAllUnits));
-      util.cleanup(fs, topDir);
-    } finally {
-      if (fs != null) {
-        try {
-          fs.close();
-        } catch (Exception e) {
-        }
-      }
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+    String topDir = "/myDir";
+    byte[] randomBytes = new byte[3000000];
+    int seed = 42;
+    new Random(seed).nextBytes(randomBytes);
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    util.createFiles(fs, topDir);
+    // set topDir to EC when it has replicated files
+    cluster.getFileSystem().getClient().setErasureCodingPolicy(
+        topDir, ecPolicy);
+
+    // create a new file under topDir
+    DFSTestUtil.createFile(fs, new Path(topDir, "ecFile"), 1024, (short) 1, 0L);
+    // Open a EC file for writing and do not close for now
+    Path openFile = new Path(topDir + "/openECFile");
+    FSDataOutputStream out = fs.create(openFile);
+    int writeCount = 0;
+    while (writeCount != 300) {
+      out.write(randomBytes);
+      writeCount++;
     }
+
+    // make sure the fsck can correctly handle mixed ec/replicated files
+    runFsck(conf, 0, true, topDir, "-files", "-blocks", "-openforwrite");
+
+    // We expect the filesystem to be HEALTHY and show one open file
+    String outStr = runFsck(conf, 0, true, openFile.toString(), "-files",
+        "-blocks", "-openforwrite");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(outStr.contains("OPENFORWRITE"));
+    assertTrue(outStr.contains("Live_repl=" + numAllUnits));
+    assertTrue(outStr.contains("Expected_repl=" + numAllUnits));
+
+    // Use -openforwrite option to list open files
+    outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks",
+        "-locations", "-openforwrite", "-replicaDetails");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(outStr.contains("OPENFORWRITE"));
+    assertTrue(outStr.contains("Live_repl=" + numAllUnits));
+    assertTrue(outStr.contains("Expected_repl=" + numAllUnits));
+    assertTrue(outStr.contains("Under Construction Block:"));
+
+    // Close the file
+    out.close();
+
+    // Now, fsck should show HEALTHY fs and should not show any open files
+    outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks",
+        "-locations", "-racks", "-replicaDetails");
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertFalse(outStr.contains("OPENFORWRITE"));
+    assertFalse(outStr.contains("Under Construction Block:"));
+    assertFalse(outStr.contains("Expected_repl=" + numAllUnits));
+    assertTrue(outStr.contains("Live_repl=" + numAllUnits));
+    util.cleanup(fs, topDir);
   }
 
   @Test
   public void testCorruptBlock() throws Exception {
-    Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
@@ -778,8 +747,6 @@ public class TestFsck {
     String outStr = null;
     short factor = 1;
 
-    MiniDFSCluster cluster = null;
-    try {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
@@ -810,7 +777,7 @@ public class TestFsck {
       IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
                         true);
     } catch (IOException ie) {
-      // Ignore exception
+      assertTrue(ie instanceof ChecksumException);
     }
 
     dfsClient = new DFSClient(new InetSocketAddress("localhost",
@@ -827,27 +794,23 @@ public class TestFsck {
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }
-    assertTrue (blocks.get(0).isCorrupt());
+    assertTrue(blocks.get(0).isCorrupt());
 
     // Check if fsck reports the same
     outStr = runFsck(conf, 1, true, "/");
     System.out.println(outStr);
     assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
     assertTrue(outStr.contains("testCorruptBlock"));
-    } finally {
-      if (cluster != null) {cluster.shutdown();}
-    }
   }
 
   @Test
   public void testUnderMinReplicatedBlock() throws Exception {
-    Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     // Set minReplication to 2
     short minReplication=2;
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,minReplication);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, minReplication);
     FileSystem fs = null;
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;
@@ -855,252 +818,234 @@ public class TestFsck {
     Random random = new Random();
     String outStr = null;
     short factor = 1;
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      cluster.waitActive();
-      fs = cluster.getFileSystem();
-      Path file1 = new Path("/testUnderMinReplicatedBlock");
-      DFSTestUtil.createFile(fs, file1, 1024, minReplication, 0);
-      // Wait until file replication has completed
-      DFSTestUtil.waitReplication(fs, file1, minReplication);
-      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
-
-      // Make sure filesystem is in healthy state
-      outStr = runFsck(conf, 0, true, "/");
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-
-      // corrupt the first replica
-      File blockFile = cluster.getBlockFile(0, block);
-      if (blockFile != null && blockFile.exists()) {
-        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
-        FileChannel channel = raFile.getChannel();
-        String badString = "BADBAD";
-        int rand = random.nextInt((int) channel.size()/2);
-        raFile.seek(rand);
-        raFile.write(badString.getBytes());
-        raFile.close();
-      }
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    Path file1 = new Path("/testUnderMinReplicatedBlock");
+    DFSTestUtil.createFile(fs, file1, 1024, minReplication, 0);
+    // Wait until file replication has completed
+    DFSTestUtil.waitReplication(fs, file1, minReplication);
+    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
 
-      dfsClient = new DFSClient(new InetSocketAddress("localhost",
-          cluster.getNameNodePort()), conf);
-      blocks = dfsClient.getNamenode().
-          getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
-      replicaCount = blocks.get(0).getLocations().length;
-      while (replicaCount != factor) {
+    // Make sure filesystem is in healthy state
+    outStr = runFsck(conf, 0, true, "/");
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+    // corrupt the first replica
+    File blockFile = cluster.getBlockFile(0, block);
+    if (blockFile != null && blockFile.exists()) {
+      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+      FileChannel channel = raFile.getChannel();
+      String badString = "BADBAD";
+      int rand = random.nextInt((int) channel.size()/2);
+      raFile.seek(rand);
+      raFile.write(badString.getBytes());
+      raFile.close();
+    }
+
+    dfsClient = new DFSClient(new InetSocketAddress("localhost",
+        cluster.getNameNodePort()), conf);
+    blocks = dfsClient.getNamenode().
+        getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+    replicaCount = blocks.get(0).getLocations().length;
+    while (replicaCount != factor) {
+      try {
+        Thread.sleep(100);
+        // Read the file to trigger reportBadBlocks
         try {
-          Thread.sleep(100);
-          // Read the file to trigger reportBadBlocks
-          try {
-            IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
-                true);
-          } catch (IOException ie) {
-            // Ignore exception
-          }
-          System.out.println("sleep in try: replicaCount="+replicaCount+"  factor="+factor);
-        } catch (InterruptedException ignore) {
+          IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(),
+              conf, true);
+        } catch (IOException ie) {
+          assertTrue(ie instanceof ChecksumException);
         }
-        blocks = dfsClient.getNamenode().
-            getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
-        replicaCount = blocks.get(0).getLocations().length;
+        System.out.println("sleep in try: replicaCount=" + replicaCount
+            + "  factor=" + factor);
+      } catch (InterruptedException ignore) {
       }
-
-      // Check if fsck reports the same
-      outStr = runFsck(conf, 0, true, "/");
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
-      assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
-    } finally {
-      if (cluster != null) {cluster.shutdown();}
+      blocks = dfsClient.getNamenode().
+          getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      replicaCount = blocks.get(0).getLocations().length;
     }
+
+    // Check if fsck reports the same
+    outStr = runFsck(conf, 0, true, "/");
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
+    assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
   }
 
   @Test(timeout = 60000)
   public void testFsckReplicaDetails() throws Exception {
 
-    final short REPL_FACTOR = 1;
-    short NUM_DN = 1;
+    final short replFactor = 1;
+    short numDn = 1;
     final long blockSize = 512;
     final long fileSize = 1024;
     boolean checkDecommissionInProgress = false;
-    String[] racks = { "/rack1" };
-    String[] hosts = { "host1" };
+    String[] racks = {"/rack1"};
+    String[] hosts = {"host1"};
 
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
-    MiniDFSCluster cluster;
     DistributedFileSystem dfs;
     cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts).racks(racks).build();
+        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
+            .racks(racks).build();
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
 
     // create files
     final String testFile = new String("/testfile");
     final Path path = new Path(testFile);
-    DFSTestUtil.createFile(dfs, path, fileSize, REPL_FACTOR, 1000L);
-    DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
-    try {
-      // make sure datanode that has replica is fine before decommission
-      String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
-      assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(fsckOut.contains("(LIVE)"));
-
-      // decommission datanode
-      ExtendedBlock eb = DFSTestUtil.getFirstBlock(dfs, path);
-      FSNamesystem fsn = cluster.getNameNode().getNamesystem();
-      BlockManager bm = fsn.getBlockManager();
-      BlockCollection bc = null;
-      try {
-        fsn.writeLock();
-        BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
-        bc = fsn.getBlockCollection(bi);
-      } finally {
-        fsn.writeUnlock();
-      }
-      DatanodeDescriptor dn = bc.getBlocks()[0]
-          .getDatanode(0);
-      bm.getDatanodeManager().getDecomManager().startDecommission(dn);
-      String dnName = dn.getXferAddr();
-
-      // check the replica status while decommissioning
-      fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
-      assertTrue(fsckOut.contains("(DECOMMISSIONING)"));
-
-      // Start 2nd Datanode and wait for decommission to start
-      cluster.startDataNodes(conf, 1, true, null, null, null);
-      DatanodeInfo datanodeInfo = null;
-      do {
-        Thread.sleep(2000);
-        for (DatanodeInfo info : dfs.getDataNodeStats()) {
-          if (dnName.equals(info.getXferAddr())) {
-            datanodeInfo = info;
-          }
-        }
-        if (!checkDecommissionInProgress && datanodeInfo != null
-            && datanodeInfo.isDecommissionInProgress()) {
-          checkDecommissionInProgress = true;
-        }
-      } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
+    DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L);
+    DFSTestUtil.waitReplication(dfs, path, replFactor);
 
-      // check the replica status after decommission is done
-      fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
-      assertTrue(fsckOut.contains("(DECOMMISSIONED)"));
+    // make sure datanode that has replica is fine before decommission
+    String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks",
+        "-replicaDetails");
+    assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(fsckOut.contains("(LIVE)"));
+
+    // decommission datanode
+    ExtendedBlock eb = DFSTestUtil.getFirstBlock(dfs, path);
+    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
+    BlockManager bm = fsn.getBlockManager();
+    BlockCollection bc = null;
+    try {
+      fsn.writeLock();
+      BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
+      bc = fsn.getBlockCollection(bi);
     } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+      fsn.writeUnlock();
     }
+    DatanodeDescriptor dn = bc.getBlocks()[0]
+        .getDatanode(0);
+    bm.getDatanodeManager().getDecomManager().startDecommission(dn);
+    String dnName = dn.getXferAddr();
+
+    // check the replica status while decommissioning
+    fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks",
+        "-replicaDetails");
+    assertTrue(fsckOut.contains("(DECOMMISSIONING)"));
+
+    // Start 2nd Datanode and wait for decommission to start
+    cluster.startDataNodes(conf, 1, true, null, null, null);
+    DatanodeInfo datanodeInfo = null;
+    do {
+      Thread.sleep(2000);
+      for (DatanodeInfo info : dfs.getDataNodeStats()) {
+        if (dnName.equals(info.getXferAddr())) {
+          datanodeInfo = info;
+        }
+      }
+      if (!checkDecommissionInProgress && datanodeInfo != null
+          && datanodeInfo.isDecommissionInProgress()) {
+        checkDecommissionInProgress = true;
+      }
+    } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
+
+    // check the replica status after decommission is done
+    fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks",
+        "-replicaDetails");
+    assertTrue(fsckOut.contains("(DECOMMISSIONED)"));
   }
 
-  /** Test if fsck can return -1 in case of failure
+  /** Test if fsck can return -1 in case of failure.
    * 
    * @throws Exception
    */
   @Test
   public void testFsckError() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      // bring up a one-node cluster
-      Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      String fileName = "/test.txt";
-      Path filePath = new Path(fileName);
-      FileSystem fs = cluster.getFileSystem();
-      
-      // create a one-block file
-      DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
-      DFSTestUtil.waitReplication(fs, filePath, (short)1);
-      
-      // intentionally corrupt NN data structure
-      INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
-          (fileName, true);
-      final BlockInfo[] blocks = node.getBlocks();
-      assertEquals(blocks.length, 1);
-      blocks[0].setNumBytes(-1L);  // set the block length to be negative
-      
-      // run fsck and expect a failure with -1 as the error code
-      String outStr = runFsck(conf, -1, true, fileName);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
-      
-      // clean up file system
-      fs.delete(filePath, true);
-    } finally {
-      if (cluster != null) {cluster.shutdown();}
-    }
+    // bring up a one-node cluster
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    String fileName = "/test.txt";
+    Path filePath = new Path(fileName);
+    FileSystem fs = cluster.getFileSystem();
+
+    // create a one-block file
+    DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
+    DFSTestUtil.waitReplication(fs, filePath, (short)1);
+
+    // intentionally corrupt NN data structure
+    INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode(
+        fileName, true);
+    final BlockInfo[] blocks = node.getBlocks();
+    assertEquals(blocks.length, 1);
+    blocks[0].setNumBytes(-1L);  // set the block length to be negative
+
+    // run fsck and expect a failure with -1 as the error code
+    String outStr = runFsck(conf, -1, true, fileName);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
+
+    // clean up file system
+    fs.delete(filePath, true);
   }
   
-  /** check if option -list-corruptfiles of fsck command works properly */
+  /** check if option -list-corruptfiles of fsck command works properly. */
   @Test
   public void testFsckListCorruptFilesBlocks() throws Exception {
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     FileSystem fs = null;
 
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      cluster.waitActive();
-      fs = cluster.getFileSystem();
-      DFSTestUtil util = new DFSTestUtil.Builder().
-          setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
-          setMaxSize(1024).build();
-      util.createFiles(fs, "/corruptData", (short) 1);
-      util.waitReplication(fs, "/corruptData", (short) 1);
-
-      // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
-      String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
-      System.out.println("1. good fsck out: " + outStr);
-      assertTrue(outStr.contains("has 0 CORRUPT files"));
-      // delete the blocks
-      final String bpid = cluster.getNamesystem().getBlockPoolId();
-      for (int i=0; i<4; i++) {
-        for (int j=0; j<=1; j++) {
-          File storageDir = cluster.getInstanceStorageDir(i, j);
-          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-          List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
-              data_dir);
-          if (metadataFiles == null)
-            continue;
-          for (File metadataFile : metadataFiles) {
-            File blockFile = Block.metaToBlockFile(metadataFile);
-            assertTrue("Cannot remove file.", blockFile.delete());
-            assertTrue("Cannot remove file.", metadataFile.delete());
-          }
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    DFSTestUtil util = new DFSTestUtil.Builder().
+        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
+        setMaxSize(1024).build();
+    util.createFiles(fs, "/corruptData", (short) 1);
+    util.waitReplication(fs, "/corruptData", (short) 1);
+
+    String outStr = runFsck(conf, 0, false, "/corruptData",
+        "-list-corruptfileblocks");
+    System.out.println("1. good fsck out: " + outStr);
+    assertTrue(outStr.contains("has 0 CORRUPT files"));
+    // delete the blocks
+    final String bpid = cluster.getNamesystem().getBlockPoolId();
+    for (int i=0; i<4; i++) {
+      for (int j=0; j<=1; j++) {
+        File storageDir = cluster.getInstanceStorageDir(i, j);
+        File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+            dataDir);
+        if (metadataFiles == null) {
+          continue;
+        }
+        for (File metadataFile : metadataFiles) {
+          File blockFile = Block.metaToBlockFile(metadataFile);
+          assertTrue("Cannot remove file.", blockFile.delete());
+          assertTrue("Cannot remove file.", metadataFile.delete());
         }
       }
+    }
 
-      // wait for the namenode to see the corruption
-      final NamenodeProtocols namenode = cluster.getNameNodeRpc();
-      CorruptFileBlocks corruptFileBlocks = namenode
+    // wait for the namenode to see the corruption
+    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
+    CorruptFileBlocks corruptFileBlocks = namenode
+        .listCorruptFileBlocks("/corruptData", null);
+    int numCorrupt = corruptFileBlocks.getFiles().length;
+    while (numCorrupt == 0) {
+      Thread.sleep(1000);
+      corruptFileBlocks = namenode
           .listCorruptFileBlocks("/corruptData", null);
-      int numCorrupt = corruptFileBlocks.getFiles().length;
-      while (numCorrupt == 0) {
-        Thread.sleep(1000);
-        corruptFileBlocks = namenode
-            .listCorruptFileBlocks("/corruptData", null);
-        numCorrupt = corruptFileBlocks.getFiles().length;
-      }
-      outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
-      System.out.println("2. bad fsck out: " + outStr);
-      assertTrue(outStr.contains("has 3 CORRUPT files"));
-
-      // Do a listing on a dir which doesn't have any corrupt blocks and validate
-      util.createFiles(fs, "/goodData");
-      outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
-      System.out.println("3. good fsck out: " + outStr);
-      assertTrue(outStr.contains("has 0 CORRUPT files"));
-      util.cleanup(fs,"/corruptData");
-      util.cleanup(fs, "/goodData");
-    } finally {
-      if (cluster != null) {cluster.shutdown();}
+      numCorrupt = corruptFileBlocks.getFiles().length;
     }
+    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
+    System.out.println("2. bad fsck out: " + outStr);
+    assertTrue(outStr.contains("has 3 CORRUPT files"));
+
+    // Do a listing on a dir which doesn't have any corrupt blocks and validate
+    util.createFiles(fs, "/goodData");
+    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
+    System.out.println("3. good fsck out: " + outStr);
+    assertTrue(outStr.contains("has 0 CORRUPT files"));
+    util.cleanup(fs, "/corruptData");
+    util.cleanup(fs, "/goodData");
   }
   
   /**
@@ -1109,193 +1054,163 @@ public class TestFsck {
    */
   @Test
   public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      // bring up a one-node cluster
-      Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      String fileName = "/test.txt";
-      Path filePath = new Path(fileName);
-      FileSystem fs = cluster.getFileSystem();
-
-      // create a one-block file
-      DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
-      DFSTestUtil.waitReplication(fs, filePath, (short) 1);
-
-      // passing illegal option
-      String outStr = runFsck(conf, -1, true, fileName, "-thisIsNotAValidFlag");
-      System.out.println(outStr);
-      assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-
-      // passing multiple paths are arguments
-      outStr = runFsck(conf, -1, true, "/", fileName);
-      System.out.println(outStr);
-      assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      // clean up file system
-      fs.delete(filePath, true);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }
+    // bring up a one-node cluster
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    String fileName = "/test.txt";
+    Path filePath = new Path(fileName);
+    FileSystem fs = cluster.getFileSystem();
+
+    // create a one-block file
+    DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
+    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
+
+    // passing illegal option
+    String outStr = runFsck(conf, -1, true, fileName, "-thisIsNotAValidFlag");
+    System.out.println(outStr);
+    assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+    // passing multiple paths are arguments
+    outStr = runFsck(conf, -1, true, "/", fileName);
+    System.out.println(outStr);
+    assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    // clean up file system
+    fs.delete(filePath, true);
   }
   
   /**
-   * Tests that the # of missing block replicas and expected replicas is correct
+   * Tests that the # of missing block replicas and expected replicas is
+   * correct.
    * @throws IOException
    */
   @Test
   public void testFsckMissingReplicas() throws IOException {
     // Desired replication factor
-    // Set this higher than NUM_REPLICAS so it's under-replicated
-    final short REPL_FACTOR = 2;
+    // Set this higher than numReplicas so it's under-replicated
+    final short replFactor = 2;
     // Number of replicas to actually start
-    final short NUM_REPLICAS = 1;
+    final short numReplicas = 1;
     // Number of blocks to write
-    final short NUM_BLOCKS = 3;
+    final short numBlocks = 3;
     // Set a small-ish blocksize
     final long blockSize = 512;
     
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     
-    MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
     
-    try {
-      // Startup a minicluster
-      cluster = 
-          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
-      assertNotNull("Failed Cluster Creation", cluster);
-      cluster.waitClusterUp();
-      dfs = cluster.getFileSystem();
-      assertNotNull("Failed to get FileSystem", dfs);
-      
-      // Create a file that will be intentionally under-replicated
-      final String pathString = new String("/testfile");
-      final Path path = new Path(pathString);
-      long fileLen = blockSize * NUM_BLOCKS;
-      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
-      
-      // Create an under-replicated file
-      NameNode namenode = cluster.getNameNode();
-      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
-          .getDatanodeManager().getNetworkTopology();
-      Map<String,String[]> pmap = new HashMap<String, String[]>();
-      Writer result = new StringWriter();
-      PrintWriter out = new PrintWriter(result, true);
-      InetAddress remoteAddress = InetAddress.getLocalHost();
-      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
-          NUM_REPLICAS, remoteAddress);
-      
-      // Run the fsck and check the Result
-      final HdfsFileStatus file = 
-          namenode.getRpcServer().getFileInfo(pathString);
-      assertNotNull(file);
-      Result replRes = new ReplicationResult(conf);
-      Result ecRes = new ErasureCodingResult(conf);
-      fsck.check(pathString, file, replRes, ecRes);
-      // Also print the output from the fsck, for ex post facto sanity checks
-      System.out.println(result.toString());
-      assertEquals(replRes.missingReplicas,
-          (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
-      assertEquals(replRes.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
-    } finally {
-      if(dfs != null) {
-        dfs.close();
-      }
-      if(cluster != null) {
-        cluster.shutdown();
-      }
-    }
+    // Startup a minicluster
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
+    assertNotNull("Failed Cluster Creation", cluster);
+    cluster.waitClusterUp();
+    dfs = cluster.getFileSystem();
+    assertNotNull("Failed to get FileSystem", dfs);
+
+    // Create a file that will be intentionally under-replicated
+    final String pathString = new String("/testfile");
+    final Path path = new Path(pathString);
+    long fileLen = blockSize * numBlocks;
+    DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
+
+    // Create an under-replicated file
+    NameNode namenode = cluster.getNameNode();
+    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
+        .getDatanodeManager().getNetworkTopology();
+    Map<String, String[]> pmap = new HashMap<String, String[]>();
+    Writer result = new StringWriter();
+    PrintWriter out = new PrintWriter(result, true);
+    InetAddress remoteAddress = InetAddress.getLocalHost();
+    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
+        numReplicas, remoteAddress);
+
+    // Run the fsck and check the Result
+    final HdfsFileStatus file =
+        namenode.getRpcServer().getFileInfo(pathString);
+    assertNotNull(file);
+    Result replRes = new ReplicationResult(conf);
+    Result ecRes = new ErasureCodingResult(conf);
+    fsck.check(pathString, file, replRes, ecRes);
+    // Also print the output from the fsck, for ex post facto sanity checks
+    System.out.println(result.toString());
+    assertEquals(replRes.missingReplicas,
+        (numBlocks*replFactor) - (numBlocks*numReplicas));
+    assertEquals(replRes.numExpectedReplicas, numBlocks*replFactor);
   }
   
   /**
-   * Tests that the # of misreplaced replicas is correct
+   * Tests that the # of misreplaced replicas is correct.
    * @throws IOException
    */
   @Test
   public void testFsckMisPlacedReplicas() throws IOException {
     // Desired replication factor
-    final short REPL_FACTOR = 2;
+    final short replFactor = 2;
     // Number of replicas to actually start
-    short NUM_DN = 2;
+    short numDn = 2;
     // Number of blocks to write
-    final short NUM_BLOCKS = 3;
+    final short numBlocks = 3;
     // Set a small-ish blocksize
     final long blockSize = 512;
     
-    String [] racks = {"/rack1", "/rack1"};
-    String [] hosts = {"host1", "host2"};
+    String[] racks = {"/rack1", "/rack1"};
+    String[] hosts = {"host1", "host2"};
     
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     
-    MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
     
-    try {
-      // Startup a minicluster
-      cluster = 
-          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
-          .racks(racks).build();
-      assertNotNull("Failed Cluster Creation", cluster);
-      cluster.waitClusterUp();
-      dfs = cluster.getFileSystem();
-      assertNotNull("Failed to get FileSystem", dfs);
-      
-      // Create a file that will be intentionally under-replicated
-      final String pathString = new String("/testfile");
-      final Path path = new Path(pathString);
-      long fileLen = blockSize * NUM_BLOCKS;
-      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
-      
-      // Create an under-replicated file
-      NameNode namenode = cluster.getNameNode();
-      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
-          .getDatanodeManager().getNetworkTopology();
-      // Add a new node on different rack, so previous blocks' replicas 
-      // are considered to be misplaced
-      nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
-      NUM_DN++;
-      
-      Map<String,String[]> pmap = new HashMap<String, String[]>();
-      Writer result = new StringWriter();
-      PrintWriter out = new PrintWriter(result, true);
-      InetAddress remoteAddress = InetAddress.getLocalHost();
-      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
-          NUM_DN, remoteAddress);
-      
-      // Run the fsck and check the Result
-      final HdfsFileStatus file = 
-          namenode.getRpcServer().getFileInfo(pathString);
-      assertNotNull(file);
-      Result replRes = new ReplicationResult(conf);
-      Result ecRes = new ErasureCodingResult(conf);
-      fsck.check(pathString, file, replRes, ecRes);
-      // check misReplicatedBlock number.
-      assertEquals(replRes.numMisReplicatedBlocks, NUM_BLOCKS);
-    } finally {
-      if(dfs != null) {
-        dfs.close();
-      }
-      if(cluster != null) {
-        cluster.shutdown();
-      }
-    }
+    // Startup a minicluster
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
+        .racks(racks).build();
+    assertNotNull("Failed Cluster Creation", cluster);
+    cluster.waitClusterUp();
+    dfs = cluster.getFileSystem();
+    assertNotNull("Failed to get FileSystem", dfs);
+
+    // Create a file that will be intentionally under-replicated
+    final String pathString = new String("/testfile");
+    final Path path = new Path(pathString);
+    long fileLen = blockSize * numBlocks;
+    DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
+
+    // Create an under-replicated file
+    NameNode namenode = cluster.getNameNode();
+    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
+        .getDatanodeManager().getNetworkTopology();
+    // Add a new node on different rack, so previous blocks' replicas
+    // are considered to be misplaced
+    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
+    numDn++;
+
+    Map<String, String[]> pmap = new HashMap<String, String[]>();
+    Writer result = new StringWriter();
+    PrintWriter out = new PrintWriter(result, true);
+    InetAddress remoteAddress = InetAddress.getLocalHost();
+    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
+        numDn, remoteAddress);
+
+    // Run the fsck and check the Result
+    final HdfsFileStatus file =
+        namenode.getRpcServer().getFileInfo(pathString);
+    assertNotNull(file);
+    Result replRes = new ReplicationResult(conf);
+    Result ecRes = new ErasureCodingResult(conf);
+    fsck.check(pathString, file, replRes, ecRes);
+    // check misReplicatedBlock number.
+    assertEquals(replRes.numMisReplicatedBlocks, numBlocks);
   }
 
-  /** Test fsck with FileNotFound */
+  /** Test fsck with FileNotFound. */
   @Test
   public void testFsckFileNotFound() throws Exception {
 
     // Number of replicas to actually start
-    final short NUM_REPLICAS = 1;
+    final short numReplicas = 1;
 
-    Configuration conf = new Configuration();
     NameNode namenode = mock(NameNode.class);
     NetworkTopology nettop = mock(NetworkTopology.class);
-    Map<String,String[]> pmap = new HashMap<>();
+    Map<String, String[]> pmap = new HashMap<>();
     Writer result = new StringWriter();
     PrintWriter out = new PrintWriter(result, true);
     InetAddress remoteAddress = InetAddress.getLocalHost();
@@ -1313,7 +1228,7 @@ public class TestFsck {
     when(blockManager.getDatanodeManager()).thenReturn(dnManager);
 
     NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
-        NUM_REPLICAS, remoteAddress);
+        numReplicas, remoteAddress);
 
     String pathString = "/tmp/testFile";
 
@@ -1326,8 +1241,8 @@ public class TestFsck {
     FsPermission perms = FsPermission.getDefault();
     String owner = "foo";
     String group = "bar";
-    byte [] symlink = null;
-    byte [] path = DFSUtil.string2Bytes(pathString);
+    byte[] symlink = null;
+    byte[] path = DFSUtil.string2Bytes(pathString);
     long fileId = 312321L;
     int numChildren = 1;
     byte storagePolicy = 0;
@@ -1346,95 +1261,82 @@ public class TestFsck {
     assertTrue(replRes.isHealthy());
   }
 
-  /** Test fsck with symlinks in the filesystem */
+  /** Test fsck with symlinks in the filesystem. */
   @Test
   public void testFsckSymlink() throws Exception {
     final DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
-    final Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
 
-    MiniDFSCluster cluster = null;
     FileSystem fs = null;
-    try {
-      final long precision = 1L;
-      conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
-      conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
-      fs = cluster.getFileSystem();
-      final String fileName = "/srcdat";
-      util.createFiles(fs, fileName);
-      final FileContext fc = FileContext.getFileContext(
-          cluster.getConfiguration(0));
-      final Path file = new Path(fileName);
-      final Path symlink = new Path("/srcdat-symlink");
-      fc.createSymlink(file, symlink, false);
-      util.waitReplication(fs, fileName, (short)3);
-      long aTime = fc.getFileStatus(symlink).getAccessTime();
-      Thread.sleep(precision);
-      setupAuditLogs();
-      String outStr = runFsck(conf, 0, true, "/");
-      verifyAuditLogs();
-      assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-      assertTrue(outStr.contains("Total symlinks:\t\t1"));
-      util.cleanup(fs, fileName);
-    } finally {
-      if (fs != null) {try{fs.close();} catch(Exception e){}}
-      if (cluster != null) { cluster.shutdown(); }
-    }
+    final long precision = 1L;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
+        precision);
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    fs = cluster.getFileSystem();
+    final String fileName = "/srcdat";
+    util.createFiles(fs, fileName);
+    final FileContext fc = FileContext.getFileContext(
+        cluster.getConfiguration(0));
+    final Path file = new Path(fileName);
+    final Path symlink = new Path("/srcdat-symlink");
+    fc.createSymlink(file, symlink, false);
+    util.waitReplication(fs, fileName, (short)3);
+    long aTime = fc.getFileStatus(symlink).getAccessTime();
+    Thread.sleep(precision);
+    setupAuditLogs();
+    String outStr = runFsck(conf, 0, true, "/");
+    verifyAuditLogs();
+    assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    assertTrue(outStr.contains("Total symlinks:\t\t1"));
+    util.cleanup(fs, fileName);
   }
 
   /**
-   * Test for including the snapshot files in fsck report
+   * Test for including the snapshot files in fsck report.
    */
   @Test
   public void testFsckForSnapshotFiles() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .build();
-    try {
-      String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
-          "-files");
-      assertTrue(runFsck.contains("HEALTHY"));
-      final String fileName = "/srcdat";
-      DistributedFileSystem hdfs = cluster.getFileSystem();
-      Path file1 = new Path(fileName);
-      DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
-      hdfs.allowSnapshot(new Path("/"));
-      hdfs.createSnapshot(new Path("/"), "mySnapShot");
-      runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
-      assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
-      runFsck = runFsck(conf, 0, true, "/", "-files");
-      assertFalse(runFsck.contains("mySnapShot"));
-    } finally {
-      cluster.shutdown();
-    }
+    String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
+        "-files");
+    assertTrue(runFsck.contains("HEALTHY"));
+    final String fileName = "/srcdat";
+    DistributedFileSystem hdfs = cluster.getFileSystem();
+    Path file1 = new Path(fileName);
+    DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
+    hdfs.allowSnapshot(new Path("/"));
+    hdfs.createSnapshot(new Path("/"), "mySnapShot");
+    runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
+    assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
+    runFsck = runFsck(conf, 0, true, "/", "-files");
+    assertFalse(runFsck.contains("mySnapShot"));
   }
 
   /**
-   * Test for blockIdCK
+   * Test for blockIdCK.
    */
 
   @Test
   public void testBlockIdCK() throws Exception {
 
-    final short REPL_FACTOR = 2;
-    short NUM_DN = 2;
+    final short replFactor = 2;
+    short numDn = 2;
     final long blockSize = 512;
 
-    String [] racks = {"/rack1", "/rack2"};
-    String [] hosts = {"host1", "host2"};
+    String[] racks = {"/rack1", "/rack2"};
+    String[] hosts = {"host1", "host2"};
 
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
-    MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
     cluster =
-      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
+      new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
         .racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
@@ -1443,12 +1345,12 @@ public class TestFsck {
     assertNotNull("Failed to get FileSystem", dfs);
 
     DFSTestUtil util = new DFSTestUtil.Builder().
-      setName(getClass().getSimpleName()).setNumFiles(1).build();
+        setName(getClass().getSimpleName()).setNumFiles(1).build();
     //create files
     final String pathString = new String("/testfile");
     final Path path = new Path(pathString);
-    util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
-    util.waitReplication(dfs, path, REPL_FACTOR);
+    util.createFile(dfs, path, 1024, replFactor, 1000L);
+    util.waitReplication(dfs, path, replFactor);
     StringBuilder sb = new StringBuilder();
     for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
       sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
@@ -1456,46 +1358,40 @@ public class TestFsck {
     String[] bIds = sb.toString().split(" ");
 
     //run fsck
-    try {
-      //illegal input test
-      String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
-          "not_a_block_id");
-      assertTrue(runFsckResult.contains("Incorrect blockId format:"));
-
-      //general test
-      runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
-      assertTrue(runFsckResult.contains(bIds[0]));
-      assertTrue(runFsckResult.contains(bIds[1]));
-      assertTrue(runFsckResult.contains(
-          "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
-      assertTrue(runFsckResult.contains(
-          "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
-    } finally {
-      cluster.shutdown();
-    }
+    //illegal input test
+    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
+        "not_a_block_id");
+    assertTrue(runFsckResult.contains("Incorrect blockId format:"));
+
+    //general test
+    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
+    assertTrue(runFsckResult.contains(bIds[0]));
+    assertTrue(runFsckResult.contains(bIds[1]));
+    assertTrue(runFsckResult.contains(
+        "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
+    assertTrue(runFsckResult.contains(
+        "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
   }
 
   /**
-   * Test for blockIdCK with datanode decommission
+   * Test for blockIdCK with datanode decommission.
    */
   @Test
   public void testBlockIdCKDecommission() throws Exception {
 
-    final short REPL_FACTOR = 1;
-    short NUM_DN = 2;
+    final short replFactor = 1;
+    short numDn = 2;
     final long blockSize = 512;
     boolean checkDecommissionInProgress = false;
-    String [] racks = {"/rack1", "/rack2"};
-    String [] hosts = {"host1", "host2"};
+    String[] racks = {"/rack1", "/rack2"};
+    String[] hosts = {"host1", "host2"};
 
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
-    MiniDFSCluster cluster;
-    DistributedFileSystem dfs ;
+    DistributedFileSystem dfs;
     cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
+        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
             .racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
@@ -1508,137 +1404,124 @@ public class TestFsck {
     //create files
     final String pathString = new String("/testfile");
     final Path path = new Path(pathString);
-    util.createFile(dfs, path, 1024, REPL_FACTOR, 1000L);
-    util.waitReplication(dfs, path, REPL_FACTOR);
+    util.createFile(dfs, path, 1024, replFactor, 1000L);
+    util.waitReplication(dfs, path, replFactor);
     StringBuilder sb = new StringBuilder();
     for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
       sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
     }
     String[] bIds = sb.toString().split(" ");
-    try {
-      //make sure datanode that has replica is fine before decommission
-      String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-
-      //decommission datanode
-      FSNamesystem fsn = cluster.getNameNode().getNamesystem();
-      BlockManager bm = fsn.getBlockManager();
-      ExtendedBlock eb = util.getFirstBlock(dfs, path);
-      BlockCollection bc = null;
-      try {
-        fsn.writeLock();
-        BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
-        bc = fsn.getBlockCollection(bi);
-      } finally {
-        fsn.writeUnlock();
-      }
-      DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
-      bm.getDatanodeManager().getDecomManager().startDecommission(dn);
-      String dnName = dn.getXferAddr();
-
-      //wait for decommission start
-      DatanodeInfo datanodeInfo = null;
-      int count = 0;
-      do {
-        Thread.sleep(2000);
-        for (DatanodeInfo info : dfs.getDataNodeStats()) {
-          if (dnName.equals(info.getXferAddr())) {
-            datanodeInfo = info;
-          }
-        }
-         //check decommissioning only once
-        if(!checkDecommissionInProgress && datanodeInfo != null
-            && datanodeInfo.isDecommissionInProgress()) {
-          String fsckOut = runFsck(conf, 3, true, "/", "-blockId", bIds[0]);
-          assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONING_STATUS));
-          checkDecommissionInProgress =  true;
-        }
-      } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
 
-      //check decommissioned
-      String fsckOut = runFsck(conf, 2, true, "/", "-blockId", bIds[0]);
-      assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONED_STATUS));
+    //make sure datanode that has replica is fine before decommission
+    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+    //decommission datanode
+    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
+    BlockManager bm = fsn.getBlockManager();
+    ExtendedBlock eb = util.getFirstBlock(dfs, path);
+    BlockCollection bc = null;
+    try {
+      fsn.writeLock();
+      BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
+      bc = fsn.getBlockCollection(bi);
     } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+      fsn.writeUnlock();
     }
+    DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
+    bm.getDatanodeManager().getDecomManager().startDecommission(dn);
+    String dnName = dn.getXferAddr();
+
+    //wait for decommission start
+    DatanodeInfo datanodeInfo = null;
+    int count = 0;
+    do {
+      Thread.sleep(2000);
+      for (DatanodeInfo info : dfs.getDataNodeStats()) {
+        if (dnName.equals(info.getXferAddr())) {
+          datanodeInfo = info;
+        }
+      }
+       //check decommissioning only once
+      if(!checkDecommissionInProgress && datanodeInfo != null
+          && datanodeInfo.isDecommissionInProgress()) {
+        String fsckOut = runFsck(conf, 3, true, "/", "-blockId", bIds[0]);
+        assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONING_STATUS));
+        checkDecommissionInProgress =  true;
+      }
+    } while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
+
+    //check decommissioned
+    String fsckOut = runFsck(conf, 2, true, "/", "-blockId", bIds[0]);
+    assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONED_STATUS));
   }
 
   /**
-   * Test for blockIdCK with block corruption
+   * Test for blockIdCK with block corruption.
    */
   @Test
   public void testBlockIdCKCorruption() throws Exception {
-    short NUM_DN = 1;
+    short numDn = 1;
     final long blockSize = 512;
     Random random = new Random();
     ExtendedBlock block;
     short repFactor = 1;
-    String [] racks = {"/rack1"};
-    String [] hosts = {"host1"};
+    String[] racks = {"/rack1"};
+    String[] hosts = {"host1"};
 
-    Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
-    MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
-    try {
-      cluster =
-          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
-              .racks(racks).build();
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
+            .racks(racks).build();
 
-      assertNotNull("Failed Cluster Creation", cluster);
-      cluster.waitClusterUp();
-      dfs = cluster.getFileSystem();
-      assertNotNull("Failed to get FileSystem", dfs);
+    assertNotNull("Failed Cluster Creation", cluster);
+    cluster.waitClusterUp();
+    dfs = cluster.getFileSystem();
+    assertNotNull("Failed to get FileSystem", dfs);
 
-      DFSTestUtil util = new DFSTestUtil.Builder().
+    DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
-      //create files
-      final String pathString = new String("/testfile");
-      final Path path = new Path(pathString);
-      util.createFile(dfs, path, 1024, repFactor, 1000L);
-      util.waitReplication(dfs, path, repFactor);
-      StringBuilder sb = new StringBuilder();
-      for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
-        sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
-      }
-      String[] bIds = sb.toString().split(" ");
-
-      //make sure block is healthy before we corrupt it
-      String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
-
-      // corrupt replicas
-      block = DFSTestUtil.getFirstBlock(dfs, path);
-      File blockFile = cluster.getBlockFile(0, block);
-      if (blockFile != null && blockFile.exists()) {
-        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
-        FileChannel channel = raFile.getChannel();
-        String badString = "BADBAD";
-        int rand = random.nextInt((int) channel.size()/2);
-        raFile.seek(rand);
-        raFile.write(badString.getBytes());
-        raFile.close();
-      }
+    //create files
+    final String pathString = new String("/testfile");
+    final Path path = new Path(pathString);
+    util.createFile(dfs, path, 1024, repFactor, 1000L);
+    util.waitReplication(dfs, path, repFactor);
+    StringBuilder sb = new StringBuilder();
+    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
+      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
+    }
+    String[] bIds = sb.toString().split(" ");
 
-      util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);
+    //make sure block is healthy before we corrupt it
+    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
 
-      outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
-      System.out.println(outStr);
-      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+    // corrupt replicas
+    block = DFSTestUtil.getFirstBlock(dfs, path);
+    File blockFile = cluster.getBlockFile(0, block);
+    if (blockFile != null && blockFile.exists()) {
+      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+      FileChannel channel = raFile.getChannel();
+      String badString = "BADBAD";
+      int rand = random.nextInt((int) channel.size()/2);
+      raFile.seek(rand);
+      raFile.write(badString.getBytes());
+      raFile.close();
     }
+
+    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);
+
+    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
+    System.out.println(outStr);
+    assertTrue(outStr.cont

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/51] [abbrv] hadoop git commit: HDFS-10789. Route webhdfs through the RPC call queue. Contributed by Daryn Sharp and Rushabh S Shah.

Posted by ae...@apache.org.
HDFS-10789. Route webhdfs through the RPC call queue. Contributed by Daryn Sharp and Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85cd06f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85cd06f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85cd06f6

Branch: refs/heads/HDFS-7240
Commit: 85cd06f6636f295ad1f3bf2a90063f4714c9cca7
Parents: 6476934
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed Oct 12 15:11:42 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Wed Oct 12 15:11:42 2016 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/ipc/ExternalCall.java     |   9 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |   6 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 +
 .../hdfs/server/namenode/FSNamesystem.java      |  15 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  12 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   6 +-
 .../web/resources/NamenodeWebHdfsMethods.java   | 150 +++++++++++--------
 .../src/main/resources/hdfs-default.xml         |   7 +
 .../server/namenode/TestNamenodeRetryCache.java |  25 +++-
 .../web/resources/TestWebHdfsDataLocality.java  |  25 +++-
 10 files changed, 160 insertions(+), 98 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
index 9b4cbcf..5566136 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ipc;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.ipc.Server.Call;
@@ -37,14 +38,10 @@ public abstract class ExternalCall<T> extends Call {
 
   public abstract UserGroupInformation getRemoteUser();
 
-  public final T get() throws IOException, InterruptedException {
+  public final T get() throws InterruptedException, ExecutionException {
     waitForCompletion();
     if (error != null) {
-      if (error instanceof IOException) {
-        throw (IOException)error;
-      } else {
-        throw new IOException(error);
-      }
+      throw new ExecutionException(error);
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 92d9183..72b603a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -72,6 +72,7 @@ import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
@@ -989,8 +990,9 @@ public class TestRPC extends TestRpcBase {
       try {
         exceptionCall.get();
         fail("didn't throw");
-      } catch (IOException ioe) {
-        assertEquals(expectedIOE.getMessage(), ioe.getMessage());
+      } catch (ExecutionException ee) {
+        assertTrue((ee.getCause()) instanceof IOException);
+        assertEquals(expectedIOE.getMessage(), ee.getCause().getMessage());
       }
     } finally {
       server.stop();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 18209ae..10c0ad6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -70,6 +70,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.webhdfs.ugi.expire.after.access";
   public static final int     DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_DEFAULT =
       10*60*1000; //10 minutes
+  public static final String DFS_WEBHDFS_USE_IPC_CALLQ =
+      "dfs.webhdfs.use.ipc.callq";
+  public static final boolean DFS_WEBHDFS_USE_IPC_CALLQ_DEFAULT = true;
 
   // HA related configuration
   public static final String  DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b9b02ef..8c59186 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -242,7 +242,6 @@ import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
 import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
-import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -338,7 +337,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   private void logAuditEvent(boolean succeeded, String cmd, String src,
       String dst, HdfsFileStatus stat) throws IOException {
     if (isAuditEnabled() && isExternalInvocation()) {
-      logAuditEvent(succeeded, getRemoteUser(), getRemoteIp(),
+      logAuditEvent(succeeded, Server.getRemoteUser(), Server.getRemoteIp(),
                     cmd, src, dst, stat);
     }
   }
@@ -5262,17 +5261,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * RPC call context even if the client exits.
    */
   boolean isExternalInvocation() {
-    return Server.isRpcInvocation() || NamenodeWebHdfsMethods.isWebHdfsInvocation();
+    return Server.isRpcInvocation();
   }
 
-  private static InetAddress getRemoteIp() {
-    InetAddress ip = Server.getRemoteIp();
-    if (ip != null) {
-      return ip;
-    }
-    return NamenodeWebHdfsMethods.getRemoteIp();
-  }
-  
   // optimize ugi lookup for RPC operations to avoid a trip through
   // UGI.getCurrentUser which is synch'ed
   private static UserGroupInformation getRemoteUser() throws IOException {
@@ -6918,7 +6909,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           sb.append(trackingId);
         }
         sb.append("\t").append("proto=");
-        sb.append(NamenodeWebHdfsMethods.isWebHdfsInvocation() ? "webhdfs" : "rpc");
+        sb.append(Server.getProtocol());
         if (isCallerContextEnabled &&
             callerContext != null &&
             callerContext.isContextValid()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index ae7a937..afedbb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -64,7 +64,9 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.ipc.ExternalCall;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -407,7 +409,15 @@ public class NameNode extends ReconfigurableBase implements
   public NamenodeProtocols getRpcServer() {
     return rpcServer;
   }
-  
+
+  public void queueExternalCall(ExternalCall<?> extCall)
+      throws IOException, InterruptedException {
+    if (rpcServer == null) {
+      throw new RetriableException("Namenode is in startup mode");
+    }
+    rpcServer.getClientRpcServer().queueCall(extCall);
+  }
+
   public static void initMetrics(Configuration conf, NamenodeRole role) {
     metrics = NameNodeMetrics.create(conf, role);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 57f7cb1..a97a307 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -139,7 +139,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -1686,10 +1685,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   private static String getClientMachine() {
-    String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
-    if (clientMachine == null) { //not a web client
-      clientMachine = Server.getRemoteAddress();
-    }
+    String clientMachine = Server.getRemoteAddress();
     if (clientMachine == null) { //not a RPC client
       clientMachine = "";
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 3ab0c67..4887e35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -25,10 +25,13 @@ import java.io.PrintWriter;
 import java.net.InetAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.net.UnknownHostException;
+import java.security.Principal;
 import java.security.PrivilegedExceptionAction;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
+import java.util.concurrent.ExecutionException;
 
 import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
@@ -60,6 +63,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -81,8 +85,8 @@ import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.*;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.ExternalCall;
 import org.apache.hadoop.ipc.RetriableException;
-import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.Credentials;
@@ -103,39 +107,39 @@ public class NamenodeWebHdfsMethods {
   public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
 
   private static final UriFsPathParam ROOT = new UriFsPathParam("");
-  
-  private static final ThreadLocal<String> REMOTE_ADDRESS = new ThreadLocal<String>(); 
-
-  /** @return the remote client address. */
-  public static String getRemoteAddress() {
-    return REMOTE_ADDRESS.get();
-  }
-
-  public static InetAddress getRemoteIp() {
-    try {
-      return InetAddress.getByName(getRemoteAddress());
-    } catch (Exception e) {
-      return null;
-    }
-  }
 
-  /**
-   * Returns true if a WebHdfs request is in progress.  Akin to
-   * {@link Server#isRpcInvocation()}.
-   */
-  public static boolean isWebHdfsInvocation() {
-    return getRemoteAddress() != null;
-  }
+  private volatile Boolean useIpcCallq;
+  private String scheme;
+  private Principal userPrincipal;
+  private String remoteAddr;
 
   private @Context ServletContext context;
-  private @Context HttpServletRequest request;
   private @Context HttpServletResponse response;
 
+  public NamenodeWebHdfsMethods(@Context HttpServletRequest request) {
+    // the request object is a proxy to thread-locals so we have to extract
+    // what we want from it since the external call will be processed in a
+    // different thread.
+    scheme = request.getScheme();
+    userPrincipal = request.getUserPrincipal();
+    // get the remote address, if coming in via a trusted proxy server then
+    // the address with be that of the proxied client
+    remoteAddr = JspHelper.getRemoteAddr(request);
+  }
+
   private void init(final UserGroupInformation ugi,
       final DelegationParam delegation,
       final UserParam username, final DoAsParam doAsUser,
       final UriFsPathParam path, final HttpOpParam<?> op,
       final Param<?, ?>... parameters) {
+    if (useIpcCallq == null) {
+      Configuration conf =
+          (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
+      useIpcCallq = conf.getBoolean(
+          DFSConfigKeys.DFS_WEBHDFS_USE_IPC_CALLQ,
+          DFSConfigKeys.DFS_WEBHDFS_USE_IPC_CALLQ_DEFAULT);
+    }
+
     if (LOG.isTraceEnabled()) {
       LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
           + ", ugi=" + ugi + ", " + username + ", " + doAsUser
@@ -144,16 +148,8 @@ public class NamenodeWebHdfsMethods {
 
     //clear content type
     response.setContentType(null);
-    
-    // set the remote address, if coming in via a trust proxy server then
-    // the address with be that of the proxied client
-    REMOTE_ADDRESS.set(JspHelper.getRemoteAddr(request));
   }
 
-  private void reset() {
-    REMOTE_ADDRESS.set(null);
-  }
-  
   private static NamenodeProtocols getRPCServer(NameNode namenode)
       throws IOException {
      final NamenodeProtocols np = namenode.getRpcServer();
@@ -162,11 +158,63 @@ public class NamenodeWebHdfsMethods {
      }
      return np;
   }
-  
+
+  private <T> T doAs(final UserGroupInformation ugi,
+      final PrivilegedExceptionAction<T> action)
+          throws IOException, InterruptedException {
+    return useIpcCallq ? doAsExternalCall(ugi, action) : ugi.doAs(action);
+  }
+
+  private <T> T doAsExternalCall(final UserGroupInformation ugi,
+      final PrivilegedExceptionAction<T> action)
+          throws IOException, InterruptedException {
+    // set the remote address, if coming in via a trust proxy server then
+    // the address with be that of the proxied client
+    ExternalCall<T> call = new ExternalCall<T>(action){
+      @Override
+      public UserGroupInformation getRemoteUser() {
+        return ugi;
+      }
+      @Override
+      public String getProtocol() {
+        return "webhdfs";
+      }
+      @Override
+      public String getHostAddress() {
+        return remoteAddr;
+      }
+      @Override
+      public InetAddress getHostInetAddress() {
+        try {
+          return InetAddress.getByName(getHostAddress());
+        } catch (UnknownHostException e) {
+          return null;
+        }
+      }
+    };
+    final NameNode namenode = (NameNode)context.getAttribute("name.node");
+    namenode.queueExternalCall(call);
+    T result = null;
+    try {
+      result = call.get();
+    } catch (ExecutionException ee) {
+      Throwable t = ee.getCause();
+      if (t instanceof RuntimeException) {
+        throw (RuntimeException)t;
+      } else if (t instanceof IOException) {
+        throw (IOException)t;
+      } else {
+        throw new IOException(t);
+      }
+    }
+    return result;
+  }
+
   @VisibleForTesting
   static DatanodeInfo chooseDatanode(final NameNode namenode,
       final String path, final HttpOpParam.Op op, final long openOffset,
-      final long blocksize, final String excludeDatanodes) throws IOException {
+      final long blocksize, final String excludeDatanodes,
+      final String remoteAddr) throws IOException {
     FSNamesystem fsn = namenode.getNamesystem();
     if (fsn == null) {
       throw new IOException("Namesystem has not been intialized yet.");
@@ -190,7 +238,7 @@ public class NamenodeWebHdfsMethods {
     if (op == PutOpParam.Op.CREATE) {
       //choose a datanode near to client 
       final DatanodeDescriptor clientNode = bm.getDatanodeManager(
-          ).getDatanodeByHost(getRemoteAddress());
+          ).getDatanodeByHost(remoteAddr);
       if (clientNode != null) {
         final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
             path, clientNode, excludes, blocksize);
@@ -253,7 +301,8 @@ public class NamenodeWebHdfsMethods {
       return null;
     }
     final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
-    Text kind = request.getScheme().equals("http") ? WebHdfsConstants.WEBHDFS_TOKEN_KIND
+    Text kind = scheme.equals("http")
+        ? WebHdfsConstants.WEBHDFS_TOKEN_KIND
         : WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
     t.setKind(kind);
     return t;
@@ -267,7 +316,7 @@ public class NamenodeWebHdfsMethods {
       final Param<?, ?>... parameters) throws URISyntaxException, IOException {
     final DatanodeInfo dn;
     dn = chooseDatanode(namenode, path, op, openOffset, blocksize,
-        excludeDatanodes);
+        excludeDatanodes, remoteAddr);
     if (dn == null) {
       throw new IOException("Failed to find datanode, suggest to check cluster"
           + " health. excludeDatanodes=" + excludeDatanodes);
@@ -283,7 +332,7 @@ public class NamenodeWebHdfsMethods {
     } else {
       //generate a token
       final Token<? extends TokenIdentifier> t = generateDelegationToken(
-          namenode, ugi, request.getUserPrincipal().getName());
+          namenode, ugi, userPrincipal.getName());
       delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
     }
     final String query = op.toQueryString() + delegationQuery
@@ -291,7 +340,6 @@ public class NamenodeWebHdfsMethods {
         + Param.toSortedString("&", parameters);
     final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;
 
-    final String scheme = request.getScheme();
     int port = "http".equals(scheme) ? dn.getInfoPort() : dn
         .getInfoSecurePort();
     final URI uri = new URI(scheme, null, dn.getHostName(), port, uripath,
@@ -446,10 +494,9 @@ public class NamenodeWebHdfsMethods {
         xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes,
         createFlagParam, noredirect);
 
-    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
+    return doAs(ugi, new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        try {
           return put(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, destination, owner, group,
               permission, unmaskedPermission, overwrite, bufferSize,
@@ -458,9 +505,6 @@ public class NamenodeWebHdfsMethods {
               aclPermission, xattrName, xattrValue, xattrSetFlag,
               snapshotName, oldSnapshotName, excludeDatanodes,
               createFlagParam, noredirect);
-        } finally {
-          reset();
-        }
       }
     });
   }
@@ -703,16 +747,12 @@ public class NamenodeWebHdfsMethods {
     init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize,
         excludeDatanodes, newLength);
 
-    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
+    return doAs(ugi, new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        try {
           return post(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, concatSrcs, bufferSize,
               excludeDatanodes, newLength, noredirect);
-        } finally {
-          reset();
-        }
       }
     });
   }
@@ -858,17 +898,13 @@ public class NamenodeWebHdfsMethods {
         renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction,
         tokenKind, tokenService, startAfter);
 
-    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
+    return doAs(ugi, new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
-        try {
           return get(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
               xattrNames, xattrEncoding, excludeDatanodes, fsAction, tokenKind,
               tokenService, noredirect, startAfter);
-        } finally {
-          reset();
-        }
       }
     });
   }
@@ -1138,15 +1174,11 @@ public class NamenodeWebHdfsMethods {
 
     init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName);
 
-    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
+    return doAs(ugi, new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException {
-        try {
           return delete(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, recursive, snapshotName);
-        } finally {
-          reset();
-        }
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index db4035d..84b51f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4281,4 +4281,11 @@
     </description>
   </property>
 
+  <property>
+    <name>dfs.webhdfs.use.ipc.callq</name>
+    <value>true</value>
+    <description>Enables routing of webhdfs calls through rpc
+      call queue</description>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index 26efce5..d7a2c81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.LightWeightCache;
 import org.junit.After;
@@ -111,19 +112,33 @@ public class TestNamenodeRetryCache {
     }
   }
   
+  static class DummyCall extends Server.Call {
+    private UserGroupInformation ugi;
+
+    DummyCall(int callId, byte[] clientId) {
+      super(callId, 1, null, null, RpcKind.RPC_PROTOCOL_BUFFER, clientId);
+      try {
+        ugi = UserGroupInformation.getCurrentUser();
+      } catch (IOException ioe) {
+      }
+    }
+    @Override
+    public UserGroupInformation getRemoteUser() {
+      return ugi;
+    }
+  }
   /** Set the current Server RPC call */
   public static void newCall() {
-    Server.Call call = new Server.Call(++callId, 1, null, null,
-        RpcKind.RPC_PROTOCOL_BUFFER, CLIENT_ID);
+    Server.Call call = new DummyCall(++callId, CLIENT_ID);
     Server.getCurCall().set(call);
   }
   
   public static void resetCall() {
-    Server.Call call = new Server.Call(RpcConstants.INVALID_CALL_ID, 1, null,
-        null, RpcKind.RPC_PROTOCOL_BUFFER, RpcConstants.DUMMY_CLIENT_ID);
+    Server.Call call = new DummyCall(RpcConstants.INVALID_CALL_ID,
+        RpcConstants.DUMMY_CLIENT_ID);
     Server.getCurCall().set(call);
   }
-  
+
   private void concatSetup(String file1, String file2) throws Exception {
     DFSTestUtil.createFile(filesystem, new Path(file1), BlockSize, (short)1, 0L);
     DFSTestUtil.createFile(filesystem, new Path(file2), BlockSize, (short)1, 0L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
index 15e1c04..604bf79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.web.resources;
 import static org.mockito.Mockito.*;
 
 import java.io.IOException;
+import java.net.InetAddress;
 import java.util.Arrays;
 import java.util.List;
 
@@ -62,6 +63,9 @@ public class TestWebHdfsDataLocality {
   private static final String RACK1 = "/rack1";
   private static final String RACK2 = "/rack2";
 
+  private static final String LOCALHOST =
+      InetAddress.getLoopbackAddress().getHostName();
+
   @Rule
   public final ExpectedException exception = ExpectedException.none();
 
@@ -96,7 +100,8 @@ public class TestWebHdfsDataLocality {
 
           //The chosen datanode must be the same as the client address
           final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-              namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, null);
+              namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, null,
+              LOCALHOST);
           Assert.assertEquals(ipAddr, chosen.getIpAddr());
         }
       }
@@ -121,19 +126,22 @@ public class TestWebHdfsDataLocality {
 
       { //test GETFILECHECKSUM
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-            namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, null);
+            namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, null,
+            LOCALHOST);
         Assert.assertEquals(expected, chosen);
       }
   
       { //test OPEN
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-            namenode, f, GetOpParam.Op.OPEN, 0, blocksize, null);
+            namenode, f, GetOpParam.Op.OPEN, 0, blocksize, null,
+            LOCALHOST);
         Assert.assertEquals(expected, chosen);
       }
 
       { //test APPEND
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-            namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, null);
+            namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, null,
+            LOCALHOST);
         Assert.assertEquals(expected, chosen);
       }
     } finally {
@@ -189,7 +197,7 @@ public class TestWebHdfsDataLocality {
         { // test GETFILECHECKSUM
           final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
               namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize,
-              sb.toString());
+              sb.toString(), LOCALHOST);
           for (int j = 0; j <= i; j++) {
             Assert.assertNotEquals(locations[j].getHostName(),
                 chosen.getHostName());
@@ -198,7 +206,8 @@ public class TestWebHdfsDataLocality {
 
         { // test OPEN
           final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
-              namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString());
+              namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString(),
+              LOCALHOST);
           for (int j = 0; j <= i; j++) {
             Assert.assertNotEquals(locations[j].getHostName(),
                 chosen.getHostName());
@@ -208,7 +217,7 @@ public class TestWebHdfsDataLocality {
         { // test APPEND
           final DatanodeInfo chosen = NamenodeWebHdfsMethods
               .chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L,
-                  blocksize, sb.toString());
+                  blocksize, sb.toString(), LOCALHOST);
           for (int j = 0; j <= i; j++) {
             Assert.assertNotEquals(locations[j].getHostName(),
                 chosen.getHostName());
@@ -229,6 +238,6 @@ public class TestWebHdfsDataLocality {
     exception.expect(IOException.class);
     exception.expectMessage("Namesystem has not been intialized yet.");
     NamenodeWebHdfsMethods.chooseDatanode(nn, "/path", PutOpParam.Op.CREATE, 0,
-        DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT, null);
+        DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT, null, LOCALHOST);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/51] [abbrv] hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

Posted by ae...@apache.org.
HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/HDFS-7240
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Oct 13 13:52:49 2016 -0500

----------------------------------------------------------------------
 .../blockmanagement/DecommissionManager.java    | 29 +++++++++++++++++++-
 1 file changed, 28 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
      */
     private int numBlocksChecked = 0;
     /**
+     * The number of blocks checked after (re)holding lock.
+     */
+    private int numBlocksCheckedPerLock = 0;
+    /**
      * The number of nodes that have been checked on this tick. Used for 
      * statistics.
      */
@@ -418,6 +422,7 @@ public class DecommissionManager {
       }
       // Reset the checked count at beginning of each iteration
       numBlocksChecked = 0;
+      numBlocksCheckedPerLock = 0;
       numNodesChecked = 0;
       // Check decom progress
       namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
               iterkey).iterator();
       final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
 
-      while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+      while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+          .isRunning()) {
         numNodesChecked++;
         final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>>
             entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
       int decommissionOnlyReplicas = 0;
       int lowRedundancyInOpenFiles = 0;
       while (it.hasNext()) {
+        if (insufficientList == null
+            && numBlocksCheckedPerLock >= numBlocksPerCheck) {
+          // During fullscan insufficientlyReplicated will NOT be null, iterator
+          // will be DN's iterator. So should not yield lock, otherwise
+          // ConcurrentModificationException could occur.
+          // Once the fullscan done, iterator will be a copy. So can yield the
+          // lock.
+          // Yielding is required in case of block number is greater than the
+          // configured per-iteration-limit.
+          namesystem.writeUnlock();
+          try {
+            LOG.debug("Yielded lock during decommission check");
+            Thread.sleep(0, 500);
+          } catch (InterruptedException ignored) {
+            return;
+          }
+          // reset
+          numBlocksCheckedPerLock = 0;
+          namesystem.writeLock();
+        }
         numBlocksChecked++;
+        numBlocksCheckedPerLock++;
         final BlockInfo block = it.next();
         // Remove the block from the list if it's no longer in the block map,
         // e.g. the containing file has been deleted


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/51] [abbrv] hadoop git commit: HDFS-10980. Optimize check for existence of parent directory. Contributed by Daryn Sharp.

Posted by ae...@apache.org.
HDFS-10980. Optimize check for existence of parent directory. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e57fa81d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e57fa81d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e57fa81d

Branch: refs/heads/HDFS-7240
Commit: e57fa81d9559a93d77fd724f7792326c31a490be
Parents: f3f37e6
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Oct 7 17:20:15 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Oct 7 17:20:15 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirMkdirOp.java      |  2 +-
 .../hdfs/server/namenode/FSDirSymlinkOp.java    |  2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  2 +-
 .../hdfs/server/namenode/FSDirectory.java       | 11 ++---
 .../hdfs/server/namenode/TestFSDirectory.java   | 48 ++++++++++++++++++++
 5 files changed, 56 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 2d1914f..4d8d7d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -66,7 +66,7 @@ class FSDirMkdirOp {
         }
 
         if (!createParent) {
-          fsd.verifyParentDir(iip, src);
+          fsd.verifyParentDir(iip);
         }
 
         // validate that we have enough inodes. This is, at best, a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
index 6938a84..71362f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
@@ -58,7 +58,7 @@ class FSDirSymlinkOp {
       iip = fsd.resolvePathForWrite(pc, link, false);
       link = iip.getPath();
       if (!createParent) {
-        fsd.verifyParentDir(iip, link);
+        fsd.verifyParentDir(iip);
       }
       if (!fsd.isValidToCreate(link, iip)) {
         throw new IOException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 40be83b..aab0f76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -323,7 +323,7 @@ class FSDirWriteFileOp {
       }
     } else {
       if (!createParent) {
-        dir.verifyParentDir(iip, src);
+        dir.verifyParentDir(iip);
       }
       if (!flag.contains(CreateFlag.CREATE)) {
         throw new FileNotFoundException("Can't overwrite non-existent " + src);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 8456da6..a059ee5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1765,17 +1765,16 @@ public class FSDirectory implements Closeable {
   /**
    * Verify that parent directory of src exists.
    */
-  void verifyParentDir(INodesInPath iip, String src)
+  void verifyParentDir(INodesInPath iip)
       throws FileNotFoundException, ParentNotDirectoryException {
-    Path parent = new Path(src).getParent();
-    if (parent != null) {
+    if (iip.length() > 2) {
       final INode parentNode = iip.getINode(-2);
       if (parentNode == null) {
         throw new FileNotFoundException("Parent directory doesn't exist: "
-            + parent);
-      } else if (!parentNode.isDirectory() && !parentNode.isSymlink()) {
+            + iip.getParentPath());
+      } else if (!parentNode.isDirectory()) {
         throw new ParentNotDirectoryException("Parent path is not a directory: "
-            + parent);
+            + iip.getParentPath());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57fa81d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
index 2b43c0f..071bdf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.BufferedReader;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.StringReader;
 import java.util.EnumSet;
@@ -30,6 +31,7 @@ import com.google.common.collect.ImmutableList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -386,4 +388,50 @@ public class TestFSDirectory {
                                                        XAttrSetFlag.REPLACE));
     verifyXAttrsPresent(newXAttrs, 4);
   }
+
+  @Test
+  public void testVerifyParentDir() throws Exception {
+    hdfs.mkdirs(new Path("/dir1/dir2"));
+    hdfs.createNewFile(new Path("/dir1/file"));
+    hdfs.createNewFile(new Path("/dir1/dir2/file"));
+
+    INodesInPath iip = fsdir.resolvePath(null, "/");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir1");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir1/file");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir-nonexist/file");
+    try {
+      fsdir.verifyParentDir(iip);
+      fail("expected FNF");
+    } catch (FileNotFoundException fnf) {
+      // expected.
+    }
+
+    iip = fsdir.resolvePath(null, "/dir1/dir2");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir1/dir2/file");
+    fsdir.verifyParentDir(iip);
+
+    iip = fsdir.resolvePath(null, "/dir1/dir-nonexist/file");
+    try {
+      fsdir.verifyParentDir(iip);
+      fail("expected FNF");
+    } catch (FileNotFoundException fnf) {
+      // expected.
+    }
+
+    iip = fsdir.resolvePath(null, "/dir1/file/fail");
+    try {
+      fsdir.verifyParentDir(iip);
+      fail("expected FNF");
+    } catch (ParentNotDirectoryException pnd) {
+      // expected.
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/51] [abbrv] hadoop git commit: Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin."

Posted by ae...@apache.org.
Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin."

This reverts commit fdce515091f0a61ffd6c9ae464a68447dedf1124.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c721aa0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c721aa0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c721aa0

Branch: refs/heads/HDFS-7240
Commit: 8c721aa00a47a976959e3861ddd742f09db432fc
Parents: 332a61f
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Oct 13 13:23:12 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Oct 13 13:23:28 2016 -0700

----------------------------------------------------------------------
 .../blockmanagement/TestPendingInvalidateBlock.java    | 13 ++++---------
 1 file changed, 4 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c721aa0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 19f3178..696b2aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,8 +86,6 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
     final Path foo = new Path("/foo");
     DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
-    DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 10000);
-
     // restart NN
     cluster.restartNameNode(true);
     InvalidateBlocks invalidateBlocks =
@@ -100,7 +98,6 @@ public class TestPendingInvalidateBlock {
         "invalidateBlocks", mockIb);
     dfs.delete(foo, true);
 
-    waitForNumPendingDeletionBlocks(REPLICATION);
     Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
     Assert.assertEquals(REPLICATION, cluster.getNamesystem()
         .getPendingDeletionBlocks());
@@ -108,7 +105,7 @@ public class TestPendingInvalidateBlock {
         dfs.getPendingDeletionBlocksCount());
     Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-    waitForNumPendingDeletionBlocks(0);
+    waitForBlocksToDelete();
     Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
     Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
     Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -185,7 +182,7 @@ public class TestPendingInvalidateBlock {
     Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
     cluster.restartNameNode(true);
-    waitForNumPendingDeletionBlocks(0);
+    waitForBlocksToDelete();
     Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
     Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -202,8 +199,7 @@ public class TestPendingInvalidateBlock {
     return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForNumPendingDeletionBlocks(int numBlocks)
-      throws Exception {
+  private void waitForBlocksToDelete() throws Exception {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
 
       @Override
@@ -211,8 +207,7 @@ public class TestPendingInvalidateBlock {
         try {
           cluster.triggerBlockReports();
 
-          if (cluster.getNamesystem().getPendingDeletionBlocks()
-              == numBlocks) {
+          if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
             return true;
           }
         } catch (Exception e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/51] [abbrv] hadoop git commit: HDFS-10965. Add unit test for HDFS command 'dfsadmin -printTopology'. Contributed by Xiaobing Zhou

Posted by ae...@apache.org.
HDFS-10965. Add unit test for HDFS command 'dfsadmin -printTopology'. Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ba7092b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ba7092b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ba7092b

Branch: refs/heads/HDFS-7240
Commit: 7ba7092bbcbbccfa24b672414d315656e600096c
Parents: b84c489
Author: Mingliang Liu <li...@apache.org>
Authored: Tue Oct 11 16:47:39 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Oct 11 17:23:54 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 50 ++++++++++++++++++++
 1 file changed, 50 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ba7092b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 94ecb9e..b49f73d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
@@ -364,6 +365,55 @@ public class TestDFSAdmin {
   }
 
   @Test(timeout = 30000)
+  public void testPrintTopology() throws Exception {
+    redirectStream();
+
+    /* init conf */
+    final Configuration dfsConf = new HdfsConfiguration();
+    final File baseDir = new File(
+        PathUtils.getTestDir(getClass()),
+        GenericTestUtils.getMethodName());
+    dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
+
+    final int numDn = 4;
+    final String[] racks = {
+        "/d1/r1", "/d1/r2",
+        "/d2/r1", "/d2/r2"};
+
+    /* init cluster using topology */
+    try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
+        .numDataNodes(numDn).racks(racks).build()) {
+
+      miniCluster.waitActive();
+      assertEquals(numDn, miniCluster.getDataNodes().size());
+      final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+
+      resetStream();
+      final int ret = ToolRunner.run(dfsAdmin, new String[] {"-printTopology"});
+
+      /* collect outputs */
+      final List<String> outs = Lists.newArrayList();
+      scanIntoList(out, outs);
+
+      /* verify results */
+      assertEquals(0, ret);
+      assertEquals(
+          "There should be three lines per Datanode: the 1st line is"
+              + " rack info, 2nd node info, 3rd empty line. The total"
+              + " should be as a result of 3 * numDn.",
+          12, outs.size());
+      assertThat(outs.get(0),
+          is(allOf(containsString("Rack:"), containsString("/d1/r1"))));
+      assertThat(outs.get(3),
+          is(allOf(containsString("Rack:"), containsString("/d1/r2"))));
+      assertThat(outs.get(6),
+          is(allOf(containsString("Rack:"), containsString("/d2/r1"))));
+      assertThat(outs.get(9),
+          is(allOf(containsString("Rack:"), containsString("/d2/r2"))));
+    }
+  }
+
+  @Test(timeout = 30000)
   public void testNameNodeGetReconfigurationStatus() throws IOException,
       InterruptedException, TimeoutException {
     ReconfigurationUtil ru = mock(ReconfigurationUtil.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/51] [abbrv] hadoop git commit: HDFS-10968. BlockManager#isInNewRack should consider decommissioning nodes. Contributed by Jing Zhao.

Posted by ae...@apache.org.
HDFS-10968. BlockManager#isInNewRack should consider decommissioning nodes. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d106213
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d106213
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d106213

Branch: refs/heads/HDFS-7240
Commit: 4d106213c0f4835b723c9a50bd8080a9017122d7
Parents: 6a38d11
Author: Jing Zhao <ji...@apache.org>
Authored: Fri Oct 7 22:44:54 2016 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Fri Oct 7 22:44:54 2016 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |   6 +-
 ...constructStripedBlocksWithRackAwareness.java | 158 +++++++++++++++----
 2 files changed, 130 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d106213/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8b74609..7949439 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1781,8 +1781,12 @@ public class BlockManager implements BlockStatsMXBean {
 
   private boolean isInNewRack(DatanodeDescriptor[] srcs,
       DatanodeDescriptor target) {
+    LOG.debug("check if target {} increases racks, srcs={}", target,
+        Arrays.asList(srcs));
     for (DatanodeDescriptor src : srcs) {
-      if (src.getNetworkLocation().equals(target.getNetworkLocation())) {
+      if (!src.isDecommissionInProgress() &&
+          src.getNetworkLocation().equals(target.getNetworkLocation())) {
+        LOG.debug("the target {} is in the same rack with src {}", target, src);
         return false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d106213/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 152e153..3bc13a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -35,12 +35,14 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
-import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -58,57 +60,44 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
   }
 
-  private static final String[] hosts = getHosts();
-  private static final String[] racks = getRacks();
+  private static final String[] hosts =
+      getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1);
+  private static final String[] racks =
+      getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1, NUM_DATA_BLOCKS);
 
-  private static String[] getHosts() {
-    String[] hosts = new String[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1];
+  private static String[] getHosts(int numHosts) {
+    String[] hosts = new String[numHosts];
     for (int i = 0; i < hosts.length; i++) {
       hosts[i] = "host" + (i + 1);
     }
     return hosts;
   }
 
-  private static String[] getRacks() {
-    String[] racks = new String[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1];
-    int numHostEachRack = (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS - 1) /
-        (NUM_DATA_BLOCKS - 1) + 1;
+  private static String[] getRacks(int numHosts, int numRacks) {
+    String[] racks = new String[numHosts];
+    int numHostEachRack = numHosts / numRacks;
+    int residue = numHosts % numRacks;
     int j = 0;
-    // we have NUM_DATA_BLOCKS racks
-    for (int i = 1; i <= NUM_DATA_BLOCKS; i++) {
-      if (j == racks.length - 1) {
-        assert i == NUM_DATA_BLOCKS;
+    for (int i = 1; i <= numRacks; i++) {
+      int limit = i <= residue ? numHostEachRack + 1 : numHostEachRack;
+      for (int k = 0; k < limit; k++) {
         racks[j++] = "/r" + i;
-      } else {
-        for (int k = 0; k < numHostEachRack && j < racks.length - 1; k++) {
-          racks[j++] = "/r" + i;
-        }
       }
     }
+    assert j == numHosts;
     return racks;
   }
 
   private MiniDFSCluster cluster;
+  private static final HdfsConfiguration conf = new HdfsConfiguration();
   private DistributedFileSystem fs;
-  private FSNamesystem fsn;
-  private BlockManager bm;
 
-  @Before
-  public void setup() throws Exception {
-    final HdfsConfiguration conf = new HdfsConfiguration();
+  @BeforeClass
+  public static void setup() throws Exception {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
         false);
-
-    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
-        .numDataNodes(hosts.length).build();
-    cluster.waitActive();
-
-    fsn = cluster.getNamesystem();
-    bm = fsn.getBlockManager();
-
-    fs = cluster.getFileSystem();
-    fs.setErasureCodingPolicy(new Path("/"), null);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
   }
 
   @After
@@ -132,6 +121,15 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     return dnProp;
   }
 
+  private DataNode getDataNode(String host) {
+    for (DataNode dn : cluster.getDataNodes()) {
+      if (dn.getDatanodeId().getHostName().equals(host)) {
+        return dn;
+      }
+    }
+    return null;
+  }
+
   /**
    * When there are all the internal blocks available but they are not placed on
    * enough racks, NameNode should avoid normal decoding reconstruction but copy
@@ -143,9 +141,19 @@ public class TestReconstructStripedBlocksWithRackAwareness {
    */
   @Test
   public void testReconstructForNotEnoughRacks() throws Exception {
+    LOG.info("cluster hosts: {}, racks: {}", Arrays.asList(hosts),
+        Arrays.asList(racks));
+
+    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
+        .numDataNodes(hosts.length).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    fs.setErasureCodingPolicy(new Path("/"), null);
+    FSNamesystem fsn = cluster.getNamesystem();
+    BlockManager bm = fsn.getBlockManager();
+
     MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
         hosts[hosts.length - 1]);
-
     final Path file = new Path("/foo");
     // the file's block is in 9 dn but 5 racks
     DFSTestUtil.createFile(fs, file,
@@ -206,6 +214,12 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 
   @Test
   public void testChooseExcessReplicasToDelete() throws Exception {
+    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
+        .numDataNodes(hosts.length).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    fs.setErasureCodingPolicy(new Path("/"), null);
+
     MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
         hosts[hosts.length - 1]);
 
@@ -242,4 +256,82 @@ public class TestReconstructStripedBlocksWithRackAwareness {
       Assert.assertFalse(dn.getHostName().equals("host1"));
     }
   }
+
+  /**
+   * In case we have 10 internal blocks on 5 racks, where 9 of blocks are live
+   * and 1 decommissioning, make sure the reconstruction happens correctly.
+   */
+  @Test
+  public void testReconstructionWithDecommission() throws Exception {
+    final String[] racks = getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2,
+        NUM_DATA_BLOCKS);
+    final String[] hosts = getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2);
+    // we now have 11 hosts on 6 racks with distribution: 2-2-2-2-2-1
+    cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
+        .numDataNodes(hosts.length).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    fs.setErasureCodingPolicy(new Path("/"), null);
+
+    final BlockManager bm = cluster.getNamesystem().getBlockManager();
+    final DatanodeManager dm = bm.getDatanodeManager();
+
+    // stop h9 and h10 and create a file with 6+3 internal blocks
+    MiniDFSCluster.DataNodeProperties h9 = stopDataNode(hosts[hosts.length - 3]);
+    MiniDFSCluster.DataNodeProperties h10 = stopDataNode(hosts[hosts.length - 2]);
+    final Path file = new Path("/foo");
+    DFSTestUtil.createFile(fs, file,
+        BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
+    final BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
+        .getINode(file.toString()).asFile().getLastBlock();
+
+    // bring h9 back
+    cluster.restartDataNode(h9);
+    cluster.waitActive();
+
+    // stop h11 so that the reconstruction happens
+    MiniDFSCluster.DataNodeProperties h11 = stopDataNode(hosts[hosts.length - 1]);
+    boolean recovered = bm.countNodes(blockInfo).liveReplicas() >=
+        NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+    for (int i = 0; i < 10 & !recovered; i++) {
+      Thread.sleep(1000);
+      recovered = bm.countNodes(blockInfo).liveReplicas() >=
+          NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
+    }
+    Assert.assertTrue(recovered);
+
+    // mark h9 as decommissioning
+    DataNode datanode9 = getDataNode(hosts[hosts.length - 3]);
+    Assert.assertNotNull(datanode9);
+    final DatanodeDescriptor dn9 = dm.getDatanode(datanode9.getDatanodeId());
+    dn9.startDecommission();
+
+    // restart h10 and h11
+    cluster.restartDataNode(h10);
+    cluster.restartDataNode(h11);
+    cluster.waitActive();
+    DataNodeTestUtils.triggerBlockReport(getDataNode(hosts[hosts.length - 1]));
+
+    // start decommissioning h9
+    boolean satisfied = bm.isPlacementPolicySatisfied(blockInfo);
+    Assert.assertFalse(satisfied);
+    final DecommissionManager decomManager =
+        (DecommissionManager) Whitebox.getInternalState(dm, "decomManager");
+    cluster.getNamesystem().writeLock();
+    try {
+      dn9.stopDecommission();
+      decomManager.startDecommission(dn9);
+    } finally {
+      cluster.getNamesystem().writeUnlock();
+    }
+
+    // make sure the decommission finishes and the block in on 6 racks
+    boolean decommissioned = dn9.isDecommissioned();
+    for (int i = 0; i < 10 && !decommissioned; i++) {
+      Thread.sleep(1000);
+      decommissioned = dn9.isDecommissioned();
+    }
+    Assert.assertTrue(decommissioned);
+    Assert.assertTrue(bm.isPlacementPolicySatisfied(blockInfo));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/51] [abbrv] hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

Posted by ae...@apache.org.
HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdce5150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdce5150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdce5150

Branch: refs/heads/HDFS-7240
Commit: fdce515091f0a61ffd6c9ae464a68447dedf1124
Parents: 008122b
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Oct 13 11:41:37 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Oct 13 11:41:37 2016 -0700

----------------------------------------------------------------------
 .../blockmanagement/TestPendingInvalidateBlock.java    | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdce5150/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..19f3178 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
     final Path foo = new Path("/foo");
     DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+    DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 10000);
+
     // restart NN
     cluster.restartNameNode(true);
     InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
         "invalidateBlocks", mockIb);
     dfs.delete(foo, true);
 
+    waitForNumPendingDeletionBlocks(REPLICATION);
     Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
     Assert.assertEquals(REPLICATION, cluster.getNamesystem()
         .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
         dfs.getPendingDeletionBlocksCount());
     Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-    waitForBlocksToDelete();
+    waitForNumPendingDeletionBlocks(0);
     Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
     Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
     Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
     Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
     cluster.restartNameNode(true);
-    waitForBlocksToDelete();
+    waitForNumPendingDeletionBlocks(0);
     Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
     Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
     return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(int numBlocks)
+      throws Exception {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
 
       @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
         try {
           cluster.triggerBlockReports();
 
-          if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+          if (cluster.getNamesystem().getPendingDeletionBlocks()
+              == numBlocks) {
             return true;
           }
         } catch (Exception e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[51/51] [abbrv] hadoop git commit: Fixing merge conflicts due to HDFS-10637

Posted by ae...@apache.org.
Fixing merge conflicts due to HDFS-10637


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/841742cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/841742cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/841742cd

Branch: refs/heads/HDFS-7240
Commit: 841742cdd5717febfd58670e6f2d0ccc303a7eee
Parents: 7d70e57
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Oct 13 15:29:52 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Oct 13 15:29:52 2016 -0700

----------------------------------------------------------------------
 .../ozone/container/common/impl/ContainerLocationManagerImpl.java  | 2 +-
 .../apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java    | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/841742cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
index deb35f2..6dbd593 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
@@ -71,7 +71,7 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager {
         references = this.dataset.getFsVolumeReferences();
         for (int ndx = 0; ndx < references.size(); ndx++) {
           FsVolumeSpi vol = references.get(ndx);
-          pathList.add(Paths.get(vol.getBasePath()));
+          pathList.add(Paths.get(vol.getBaseURI().getPath()));
         }
         references.close();
         volumePaths = pathList.toArray(new Path[pathList.size()]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/841742cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index fc3e0cf..b8fd696 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -160,7 +160,7 @@ public class OzoneContainer {
         references = dataset.getFsVolumeReferences();
         for (int ndx = 0; ndx < references.size(); ndx++) {
           FsVolumeSpi vol = references.get(ndx);
-          pathList.add(Paths.get(vol.getBasePath()));
+          pathList.add(Paths.get(vol.getBaseURI().getPath()));
         }
         references.close();
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/51] [abbrv] hadoop git commit: HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception stacktrace. Contributed by Hanisha Koneru.

Posted by ae...@apache.org.
HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception stacktrace. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/008122b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/008122b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/008122b3

Branch: refs/heads/HDFS-7240
Commit: 008122b3c927767ac96dc876124bc591e10c9df4
Parents: 9097e2e
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Oct 13 11:37:03 2016 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Oct 13 11:37:03 2016 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/008122b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
index 505f76d..a2b6980 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
@@ -177,7 +177,8 @@ public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed {
           // update the used variable
           spaceUsed.refresh();
         } catch (InterruptedException e) {
-          LOG.warn("Thread Interrupted waiting to refresh disk information", e);
+          LOG.warn("Thread Interrupted waiting to refresh disk information: "
+              + e.getMessage());
           Thread.currentThread().interrupt();
         }
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/51] [abbrv] hadoop git commit: HADOOP-13698. Document caveat for KeyShell when underlying KeyProvider does not delete a key.

Posted by ae...@apache.org.
HADOOP-13698. Document caveat for KeyShell when underlying KeyProvider does not delete a key.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b84c4891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b84c4891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b84c4891

Branch: refs/heads/HDFS-7240
Commit: b84c4891f9eca8d56593e48e9df88be42e24220d
Parents: 3c9a010
Author: Xiao Chen <xi...@apache.org>
Authored: Tue Oct 11 17:05:00 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Tue Oct 11 17:05:00 2016 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/CommandsManual.md            | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b84c4891/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 4d7d504..2ece71a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -202,7 +202,9 @@ Manage keys via the KeyProvider. For details on KeyProviders, see the [Transpare
 
 Providers frequently require that a password or other secret is supplied. If the provider requires a password and is unable to find one, it will use a default password and emit a warning message that the default password is being used. If the `-strict` flag is supplied, the warning message becomes an error message and the command returns immediately with an error status.
 
-NOTE: Some KeyProviders (e.g. org.apache.hadoop.crypto.key.JavaKeyStoreProvider) does not support uppercase key names.
+NOTE: Some KeyProviders (e.g. org.apache.hadoop.crypto.key.JavaKeyStoreProvider) do not support uppercase key names.
+
+NOTE: Some KeyProviders do not directly execute a key deletion (e.g. performs a soft-delete instead, or delay the actual deletion, to prevent mistake). In these cases, one may encounter errors when creating/deleting a key with the same name after deleting it. Please check the underlying KeyProvider for details.
 
 ### `trace`
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/51] [abbrv] hadoop git commit: YARN-5057. Resourcemanager.security.TestDelegationTokenRenewer fails in trunk. Contributed by Jason Lowe.

Posted by ae...@apache.org.
YARN-5057. Resourcemanager.security.TestDelegationTokenRenewer fails in trunk. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0773ffd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0773ffd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0773ffd0

Branch: refs/heads/HDFS-7240
Commit: 0773ffd0f8383384f8cf8599476565f78aae70c9
Parents: 669d6f1
Author: Naganarasimha <na...@apache.org>
Authored: Mon Oct 10 18:04:47 2016 -0400
Committer: Naganarasimha <na...@apache.org>
Committed: Mon Oct 10 18:04:47 2016 -0400

----------------------------------------------------------------------
 .../security/TestDelegationTokenRenewer.java    | 24 ++++++++++++++++----
 1 file changed, 19 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0773ffd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index 5dfee89..205188b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -1148,17 +1148,21 @@ public class TestDelegationTokenRenewer {
         credentials, null, true, false, false, null, 0, null, false, null);
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
     rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
+    DelegationTokenRenewer renewer =
+        rm.getRMContext().getDelegationTokenRenewer();
+    DelegationTokenToRenew dttr = renewer.getAllTokens().get(token1);
+    Assert.assertNotNull(dttr);
 
     // submit app2 with the same token, set cancelTokenWhenComplete to true;
     RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2,
         credentials, null, true, false, false, null, 0, null, true, null);
     MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
     rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING);
-    MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2);
+    finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr);
     Assert.assertTrue(rm.getRMContext().getDelegationTokenRenewer()
       .getAllTokens().containsKey(token1));
 
-    MockRM.finishAMAndVerifyAppState(app1, rm, nm1, am1);
+    finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr);
     // app2 completes, app1 is still running, check the token is not cancelled
     Assert.assertFalse(Renewer.cancelled);
   }
@@ -1224,7 +1228,7 @@ public class TestDelegationTokenRenewer {
     Assert.assertTrue(dttr.referringAppIds.contains(app2.getApplicationId()));
     Assert.assertFalse(Renewer.cancelled);
 
-    MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2);
+    finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr);
     // app2 completes, app1 is still running, check the token is not cancelled
     Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
     Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
@@ -1242,14 +1246,14 @@ public class TestDelegationTokenRenewer {
     Assert.assertFalse(dttr.isTimerCancelled());
     Assert.assertFalse(Renewer.cancelled);
 
-    MockRM.finishAMAndVerifyAppState(app1, rm, nm1, am1);
+    finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr);
     Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
     Assert.assertFalse(dttr.referringAppIds.contains(app1.getApplicationId()));
     Assert.assertTrue(dttr.referringAppIds.contains(app3.getApplicationId()));
     Assert.assertFalse(dttr.isTimerCancelled());
     Assert.assertFalse(Renewer.cancelled);
 
-    MockRM.finishAMAndVerifyAppState(app3, rm, nm1, am3);
+    finishAMAndWaitForComplete(app3, rm, nm1, am3, dttr);
     Assert.assertFalse(renewer.getAllTokens().containsKey(token1));
     Assert.assertTrue(dttr.referringAppIds.isEmpty());
     Assert.assertTrue(dttr.isTimerCancelled());
@@ -1259,4 +1263,14 @@ public class TestDelegationTokenRenewer {
     Assert.assertFalse(renewer.getDelegationTokens().contains(token1));
   }
 
+  private void finishAMAndWaitForComplete(final RMApp app, MockRM rm,
+      MockNM nm, MockAM am, final DelegationTokenToRenew dttr)
+          throws Exception {
+    MockRM.finishAMAndVerifyAppState(app, rm, nm, am);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      public Boolean get() {
+        return !dttr.referringAppIds.contains(app.getApplicationId());
+      }
+    }, 10, 10000);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/51] [abbrv] hadoop git commit: HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get counted twice. Contributed by Sean Mackrory.

Posted by ae...@apache.org.
HDFS-10797. Disk usage summary of snapshots causes renamed blocks to get counted twice. Contributed by Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a38d118
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a38d118
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a38d118

Branch: refs/heads/HDFS-7240
Commit: 6a38d118d86b7907009bcec34f1b788d076f1d1c
Parents: e57fa81
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Oct 7 17:30:30 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Oct 7 17:37:15 2016 -0700

----------------------------------------------------------------------
 .../ContentSummaryComputationContext.java       |  94 ++++++++-
 .../hadoop/hdfs/server/namenode/INode.java      |   1 +
 .../hdfs/server/namenode/INodeDirectory.java    |  11 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../hdfs/server/namenode/INodeReference.java    |   2 +
 .../hdfs/server/namenode/INodeSymlink.java      |   1 +
 .../snapshot/DirectorySnapshottableFeature.java |   9 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |  14 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   1 +
 .../snapshot/TestRenameWithSnapshots.java       | 199 +++++++++++++++++++
 10 files changed, 307 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 6df9e75..4208b53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -21,6 +21,10 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+
+import java.util.HashSet;
+import java.util.Set;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -35,6 +39,8 @@ public class ContentSummaryComputationContext {
   private long yieldCount = 0;
   private long sleepMilliSec = 0;
   private int sleepNanoSec = 0;
+  private Set<INode> includedNodes = new HashSet<>();
+  private Set<INode> deletedSnapshottedNodes = new HashSet<>();
 
   /**
    * Constructor
@@ -51,8 +57,8 @@ public class ContentSummaryComputationContext {
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
     this.nextCountLimit = limitPerRun;
-    this.counts = new ContentCounts.Builder().build();
-    this.snapshotCounts = new ContentCounts.Builder().build();
+    setCounts(new ContentCounts.Builder().build());
+    setSnapshotCounts(new ContentCounts.Builder().build());
     this.sleepMilliSec = sleepMicroSec/1000;
     this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
   }
@@ -82,6 +88,7 @@ public class ContentSummaryComputationContext {
     }
 
     // Have we reached the limit?
+    ContentCounts counts = getCounts();
     long currentCount = counts.getFileCount() +
         counts.getSymlinkCount() +
         counts.getDirectoryCount() +
@@ -123,14 +130,22 @@ public class ContentSummaryComputationContext {
   }
 
   /** Get the content counts */
-  public ContentCounts getCounts() {
+  public synchronized ContentCounts getCounts() {
     return counts;
   }
 
+  private synchronized void setCounts(ContentCounts counts) {
+    this.counts = counts;
+  }
+
   public ContentCounts getSnapshotCounts() {
     return snapshotCounts;
   }
 
+  private void setSnapshotCounts(ContentCounts snapshotCounts) {
+    this.snapshotCounts = snapshotCounts;
+  }
+
   public BlockStoragePolicySuite getBlockStoragePolicySuite() {
     Preconditions.checkState((bsps != null || fsn != null),
         "BlockStoragePolicySuite must be either initialized or available via" +
@@ -138,4 +153,77 @@ public class ContentSummaryComputationContext {
     return (bsps != null) ? bsps:
         fsn.getBlockManager().getStoragePolicySuite();
   }
+
+  /**
+   * If the node is an INodeReference, resolves it to the actual inode.
+   * Snapshot diffs represent renamed / moved files as different
+   * INodeReferences, but the underlying INode it refers to is consistent.
+   *
+   * @param node
+   * @return The referred INode if there is one, else returns the input
+   * unmodified.
+   */
+  private INode resolveINodeReference(INode node) {
+    if (node.isReference() && node instanceof INodeReference) {
+      return ((INodeReference)node).getReferredINode();
+    }
+    return node;
+  }
+
+  /**
+   * Reports that a node is about to be included in this summary. Can be used
+   * either to simply report that a node has been including, or check whether
+   * a node has already been included.
+   *
+   * @param node
+   * @return true if node has already been included
+   */
+  public boolean nodeIncluded(INode node) {
+    INode resolvedNode = resolveINodeReference(node);
+    synchronized (includedNodes) {
+      if (!includedNodes.contains(resolvedNode)) {
+        includedNodes.add(resolvedNode);
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Schedules a node that is listed as DELETED in a snapshot's diff to be
+   * included in the summary at the end of computation. See
+   * {@link #tallyDeletedSnapshottedINodes()} for more context.
+   *
+   * @param node
+   */
+  public void reportDeletedSnapshottedNode(INode node) {
+    deletedSnapshottedNodes.add(node);
+  }
+
+  /**
+   * Finalizes the computation by including all nodes that were reported as
+   * deleted by a snapshot but have not been already including due to other
+   * references.
+   * <p>
+   * Nodes that get renamed are listed in the snapshot's diff as both DELETED
+   * under the old name and CREATED under the new name. The computation
+   * relies on nodes to report themselves as being included (via
+   * {@link #nodeIncluded(INode)} as the only reliable way to determine which
+   * nodes were renamed within the tree being summarized and which were
+   * removed (either by deletion or being renamed outside of the tree).
+   */
+  public synchronized void tallyDeletedSnapshottedINodes() {
+    /* Temporarily create a new counts object so these results can then be
+    added to both counts and snapshotCounts */
+    ContentCounts originalCounts = getCounts();
+    setCounts(new ContentCounts.Builder().build());
+    for (INode node : deletedSnapshottedNodes) {
+      if (!nodeIncluded(node)) {
+        node.computeContentSummary(Snapshot.CURRENT_STATE_ID, this);
+      }
+    }
+    originalCounts.addContents(getCounts());
+    snapshotCounts.addContents(getCounts());
+    setCounts(originalCounts);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index c6258a1..e1db990 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -429,6 +429,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
   public final ContentSummary computeAndConvertContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
     computeContentSummary(snapshotId, summary);
+    summary.tallyDeletedSnapshottedINodes();
     final ContentCounts counts = summary.getCounts();
     final ContentCounts snapshotCounts = summary.getSnapshotCounts();
     final QuotaCounts q = getQuotaCounts();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 24c8815..b6e2713 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -628,17 +628,10 @@ public class INodeDirectory extends INodeWithAdditionalFields
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
+    summary.nodeIncluded(this);
     final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
     if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
-      final ContentCounts counts = new ContentCounts.Builder().build();
-      // if the getContentSummary call is against a non-snapshot path, the
-      // computation should include all the deleted files/directories
-      sf.computeContentSummary4Snapshot(summary.getBlockStoragePolicySuite(),
-          counts);
-      summary.getCounts().addContents(counts);
-      // Also add ContentSummary to snapshotCounts (So we can extract it
-      // later from the ContentSummary of all).
-      summary.getSnapshotCounts().addContents(counts);
+      sf.computeContentSummary4Snapshot(summary);
     }
     final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
     if (q != null && snapshotId == Snapshot.CURRENT_STATE_ID) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 12ead7f..37f97db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -770,6 +770,7 @@ public class INodeFile extends INodeWithAdditionalFields
   @Override
   public final ContentSummaryComputationContext computeContentSummary(
       int snapshotId, final ContentSummaryComputationContext summary) {
+    summary.nodeIncluded(this);
     final ContentCounts counts = summary.getCounts();
     counts.addContent(Content.FILE, 1);
     final long fileLen = computeFileSize(snapshotId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 1b85237..56aaf8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -315,6 +315,7 @@ public abstract class INodeReference extends INode {
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       ContentSummaryComputationContext summary) {
+    summary.nodeIncluded(this);
     return referred.computeContentSummary(snapshotId, summary);
   }
 
@@ -504,6 +505,7 @@ public abstract class INodeReference extends INode {
     @Override
     public final ContentSummaryComputationContext computeContentSummary(
         int snapshotId, ContentSummaryComputationContext summary) {
+      summary.nodeIncluded(this);
       final int s = snapshotId < lastSnapshotId ? snapshotId : lastSnapshotId;
       // only count storagespace for WithName
       final QuotaCounts q = computeQuotaUsage(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index c76bea0..1223f4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -96,6 +96,7 @@ public class INodeSymlink extends INodeWithAdditionalFields {
   @Override
   public ContentSummaryComputationContext computeContentSummary(int snapshotId,
       final ContentSummaryComputationContext summary) {
+    summary.nodeIncluded(this);
     summary.getCounts().addContent(Content.SYMLINK, 1);
     return summary;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 39db979..fa7bace 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -29,9 +29,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.Content;
 import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
+import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode;
@@ -220,11 +220,12 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
   }
 
   @Override
-  public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final ContentCounts counts) {
+  public void computeContentSummary4Snapshot(ContentSummaryComputationContext
+                                                   context) {
+    ContentCounts counts = context.getCounts();
     counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
     counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
-    super.computeContentSummary4Snapshot(bsps, counts);
+    super.computeContentSummary4Snapshot(context);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 0111b3b..9addbfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.AclStorage;
-import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
 import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
@@ -629,18 +628,13 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
     return counts;
   }
 
-  public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final ContentCounts counts) {
-    // Create a new blank summary context for blocking processing of subtree.
-    ContentSummaryComputationContext summary = 
-        new ContentSummaryComputationContext(bsps);
+  public void computeContentSummary4Snapshot(
+      ContentSummaryComputationContext context) {
     for(DirectoryDiff d : diffs) {
-      for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
-        deleted.computeContentSummary(Snapshot.CURRENT_STATE_ID, summary);
+      for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
+        context.reportDeletedSnapshottedNode(deletedNode);
       }
     }
-    // Add the counts from deleted trees.
-    counts.addContents(summary.getCounts());
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
index e98e766..832a339 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
@@ -177,6 +177,7 @@ public class Snapshot implements Comparable<byte[]> {
     @Override
     public ContentSummaryComputationContext computeContentSummary(
         int snapshotId, ContentSummaryComputationContext summary) {
+      summary.nodeIncluded(this);
       return computeDirectoryContentSummary(summary, snapshotId);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a38d118/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index 91eec78..d1b3aa6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -36,8 +36,10 @@ import java.util.Random;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -2411,4 +2413,201 @@ public class TestRenameWithSnapshots {
     assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", "newDir/file2"));
     assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", "newDir/file1"));
   }
+
+  private void checkSpaceConsumed(String message, Path directory,
+                                  long expectedSpace) throws Exception {
+    ContentSummary summary = hdfs.getContentSummary(directory);
+    assertEquals(message, expectedSpace, summary.getSpaceConsumed());
+  }
+
+  /**
+   * Runs through various combinations of renames, deletes, appends and other
+   * operations in a snapshotted directory and ensures disk usage summaries
+   * (e.g. du -s) are computed correctly.
+   *
+   * @throws Exception
+   */
+  @Test (timeout=300000)
+  public void testDu() throws Exception {
+    File tempFile = File.createTempFile("testDu-", ".tmp");
+    tempFile.deleteOnExit();
+
+    final FileSystem localfs = FileSystem.getLocal(conf);
+    final Path localOriginal = new Path(tempFile.getPath());
+    final Path dfsRoot = new Path("/testDu");
+    final Path dfsOriginal = new Path(dfsRoot, "original");
+    final Path dfsRenamed1 = new Path(dfsRoot, "renamed1");
+    final Path dfsRenamed2 = new Path(dfsRoot, "renamed2");
+    final Path dfsAppended = new Path(dfsRoot, "appended");
+
+    /* We will test with a single block worth of data. If we don't at least use
+    a multiple of BLOCKSIZE, append operations will modify snapshotted blocks
+    and other factors will come into play here that we'll have to account for */
+    final long spaceIncrement = BLOCKSIZE * REPL;
+    final byte[] appendData = new byte[(int) BLOCKSIZE];
+    DFSTestUtil.createFile(localfs, localOriginal, BLOCKSIZE, REPL, SEED);
+
+    FSDataOutputStream out = null;
+    long expectedSpace = 0;
+
+    hdfs.mkdirs(dfsRoot);
+    checkSpaceConsumed("Du is wrong immediately",
+        dfsRoot, 0L);
+
+    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
+    expectedSpace += spaceIncrement;
+    checkSpaceConsumed("Du is wrong after creating / copying file",
+        dfsRoot, expectedSpace);
+
+    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s0");
+    checkSpaceConsumed("Du is wrong after snapshotting",
+        dfsRoot, expectedSpace);
+
+    hdfs.rename(dfsOriginal, dfsRenamed1);
+    checkSpaceConsumed("Du is wrong after 1 rename",
+        dfsRoot, expectedSpace);
+
+    hdfs.rename(dfsRenamed1, dfsRenamed2);
+    checkSpaceConsumed("Du is wrong after 2 renames",
+        dfsRoot, expectedSpace);
+
+    hdfs.delete(dfsRenamed2, false);
+    checkSpaceConsumed("Du is wrong after deletion",
+        dfsRoot, expectedSpace);
+
+    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
+    expectedSpace += spaceIncrement;
+    checkSpaceConsumed("Du is wrong after replacing a renamed file",
+        dfsRoot, expectedSpace);
+
+    hdfs.copyFromLocalFile(localOriginal, dfsAppended);
+    expectedSpace += spaceIncrement;
+    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s1");
+
+    out = hdfs.append(dfsAppended);
+    out.write(appendData);
+    out.close();
+    expectedSpace += spaceIncrement;
+    checkSpaceConsumed("Du is wrong after 1 snapshot + append",
+        dfsRoot, expectedSpace);
+
+    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s2");
+    out = hdfs.append(dfsAppended);
+    out.write(appendData);
+    out.close();
+    expectedSpace += spaceIncrement;
+    checkSpaceConsumed("Du is wrong after 2 snapshot + appends",
+        dfsRoot, expectedSpace);
+
+    SnapshotTestHelper.createSnapshot(hdfs, dfsRoot, "s3");
+    out = hdfs.append(dfsAppended);
+    out.write(appendData);
+    out.close();
+    expectedSpace += spaceIncrement;
+    hdfs.rename(dfsAppended, dfsRenamed1);
+    checkSpaceConsumed("Du is wrong after snapshot, append, & rename",
+        dfsRoot, expectedSpace);
+    hdfs.delete(dfsRenamed1, false);
+    // everything but the last append is snapshotted
+    expectedSpace -= spaceIncrement;
+    checkSpaceConsumed("Du is wrong after snapshot, append, delete & rename",
+        dfsRoot, expectedSpace);
+
+    hdfs.delete(dfsOriginal, false);
+    hdfs.deleteSnapshot(dfsRoot, "s0");
+    hdfs.deleteSnapshot(dfsRoot, "s1");
+    hdfs.deleteSnapshot(dfsRoot, "s2");
+    hdfs.deleteSnapshot(dfsRoot, "s3");
+    expectedSpace = 0;
+    checkSpaceConsumed("Du is wrong after deleting all files and snapshots",
+        dfsRoot, expectedSpace);
+  }
+
+  /**
+   * Runs through various combinations of renames, deletes, appends and other
+   * operations between two snapshotted directories and ensures disk usage
+   * summaries (e.g. du -s) are computed correctly.
+   *
+   * This test currently assumes some incorrect behavior when files have been
+   * moved between subdirectories of the one being queried. In the cases
+   * below, only 1 block worth of data should ever actually be used. However
+   * if there are 2 - 3 subdirectories that do contained or have contained
+   * when snapshotted the same file, that file will be counted 2-3 times,
+   * respectively, since each directory is computed independently recursively.
+   *
+   * @throws Exception
+   */
+  @Test (timeout=300000)
+  public void testDuMultipleDirs() throws Exception {
+    File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
+    tempFile.deleteOnExit();
+
+    final FileSystem localfs = FileSystem.getLocal(conf);
+    final Path localOriginal = new Path(tempFile.getPath());
+    final Path dfsRoot = new Path("/testDuMultipleDirs");
+    final Path snapshottable1 = new Path(dfsRoot, "snapshottable1");
+    final Path snapshottable2 = new Path(dfsRoot, "snapshottable2");
+    final Path nonsnapshottable = new Path(dfsRoot, "nonsnapshottable");
+    final Path subdirectory = new Path(snapshottable1, "subdirectory");
+    final Path dfsOriginal = new Path(snapshottable1, "file");
+    final Path renamedNonsnapshottable = new Path(nonsnapshottable, "file");
+    final Path renamedSnapshottable = new Path(snapshottable2, "file");
+    final Path renamedSubdirectory = new Path(subdirectory, "file");
+
+    /* We will test with a single block worth of data. If we don't at least use
+    a multiple of BLOCKSIZE, append operations will modify snapshotted blocks
+    and other factors will come into play here that we'll have to account for */
+    final long spaceConsumed = BLOCKSIZE * REPL;
+    DFSTestUtil.createFile(localfs, localOriginal, BLOCKSIZE, REPL, SEED);
+
+    hdfs.mkdirs(snapshottable1);
+    hdfs.mkdirs(snapshottable2);
+    hdfs.mkdirs(nonsnapshottable);
+    hdfs.mkdirs(subdirectory);
+    checkSpaceConsumed("Du is wrong immediately",
+        dfsRoot, 0L);
+
+    hdfs.copyFromLocalFile(localOriginal, dfsOriginal);
+    checkSpaceConsumed("Du is wrong after creating / copying file",
+        snapshottable1, spaceConsumed);
+
+    SnapshotTestHelper.createSnapshot(hdfs, snapshottable1, "s1");
+    checkSpaceConsumed("Du is wrong in original dir after 1st snapshot",
+        snapshottable1, spaceConsumed);
+
+    hdfs.rename(dfsOriginal, renamedNonsnapshottable);
+    checkSpaceConsumed("Du is wrong in original dir after 1st rename",
+        snapshottable1, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in non-snapshottable dir after 1st rename",
+        nonsnapshottable, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in root dir after 1st rename",
+        dfsRoot, spaceConsumed);
+
+    hdfs.rename(renamedNonsnapshottable, renamedSnapshottable);
+    checkSpaceConsumed("Du is wrong in original dir after 2nd rename",
+        snapshottable1, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in non-snapshottable dir after 2nd rename",
+        nonsnapshottable, 0);
+    checkSpaceConsumed("Du is wrong in snapshottable dir after 2nd rename",
+        snapshottable2, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in root dir after 2nd rename",
+        dfsRoot, spaceConsumed);
+
+    SnapshotTestHelper.createSnapshot(hdfs, snapshottable2, "s2");
+    hdfs.rename(renamedSnapshottable, renamedSubdirectory);
+    checkSpaceConsumed("Du is wrong in original dir after 3rd rename",
+        snapshottable1, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in snapshottable dir after 3rd rename",
+        snapshottable2, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in original subdirectory after 3rd rename",
+        subdirectory, spaceConsumed);
+    checkSpaceConsumed("Du is wrong in root dir after 3rd rename",
+        dfsRoot, spaceConsumed);
+
+    hdfs.delete(renamedSubdirectory, false);
+    hdfs.deleteSnapshot(snapshottable1, "s1");
+    hdfs.deleteSnapshot(snapshottable2, "s2");
+    checkSpaceConsumed("Du is wrong after deleting all files and snapshots",
+        dfsRoot, 0);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/51] [abbrv] hadoop git commit: HADOOP-13700. Remove unthrown IOException from TrashPolicy#initialize and #getInstance signatures.

Posted by ae...@apache.org.
HADOOP-13700. Remove unthrown IOException from TrashPolicy#initialize and #getInstance signatures.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d739a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d739a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d739a3

Branch: refs/heads/HDFS-7240
Commit: 12d739a34ba868b3f7f5adf7f37a60d4aca9061b
Parents: 85cd06f
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Oct 12 15:19:52 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Wed Oct 12 15:19:52 2016 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/TrashPolicy.java        | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d739a3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index 157b9ab..2fe3fd1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -53,9 +53,8 @@ public abstract class TrashPolicy extends Configured {
    * not assume trash always under /user/$USER due to HDFS encryption zone.
    * @param conf the configuration to be used
    * @param fs the filesystem to be used
-   * @throws IOException
    */
-  public void initialize(Configuration conf, FileSystem fs) throws IOException{
+  public void initialize(Configuration conf, FileSystem fs) {
     throw new UnsupportedOperationException();
   }
 
@@ -137,8 +136,7 @@ public abstract class TrashPolicy extends Configured {
    * @param fs the file system to be used
    * @return an instance of TrashPolicy
    */
-  public static TrashPolicy getInstance(Configuration conf, FileSystem fs)
-      throws IOException {
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs) {
     Class<? extends TrashPolicy> trashClass = conf.getClass(
         "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
     TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/51] [abbrv] hadoop git commit: YARN-4464. Lower the default max applications stored in the RM and store. (Daniel Templeton via kasha)

Posted by ae...@apache.org.
YARN-4464. Lower the default max applications stored in the RM and store. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6378845f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6378845f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6378845f

Branch: refs/heads/HDFS-7240
Commit: 6378845f9ef789c3fda862c43bcd498aa3f35068
Parents: 7ba7092
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Tue Oct 11 21:41:58 2016 -0700
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Tue Oct 11 21:42:08 2016 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     | 20 ++++++++++++++++----
 .../src/main/resources/yarn-default.xml         |  4 ++--
 .../server/resourcemanager/RMAppManager.java    |  2 +-
 3 files changed, 19 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 4d43357..3bd0dcc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -719,17 +719,29 @@ public class YarnConfiguration extends Configuration {
       + "leveldb-state-store.compaction-interval-secs";
   public static final long DEFAULT_RM_LEVELDB_COMPACTION_INTERVAL_SECS = 3600;
 
-  /** The maximum number of completed applications RM keeps. */ 
+  /**
+   * The maximum number of completed applications RM keeps. By default equals
+   * to {@link #DEFAULT_RM_MAX_COMPLETED_APPLICATIONS}.
+   */
   public static final String RM_MAX_COMPLETED_APPLICATIONS =
     RM_PREFIX + "max-completed-applications";
-  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 10000;
+  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 1000;
 
   /**
-   * The maximum number of completed applications RM state store keeps, by
-   * default equals to DEFAULT_RM_MAX_COMPLETED_APPLICATIONS
+   * The maximum number of completed applications RM state store keeps. By
+   * default equals to value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.
    */
   public static final String RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS =
       RM_PREFIX + "state-store.max-completed-applications";
+  /**
+   * The default value for
+   * {@code yarn.resourcemanager.state-store.max-completed-applications}.
+   * @deprecated This default value is ignored and will be removed in a future
+   * release. The default value of
+   * {@code yarn.resourcemanager.state-store.max-completed-applications} is the
+   * value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.
+   */
+  @Deprecated
   public static final int DEFAULT_RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS =
       DEFAULT_RM_MAX_COMPLETED_APPLICATIONS;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 524afec..f37c689 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -417,7 +417,7 @@
     the applications remembered in RM memory.
     Any values larger than ${yarn.resourcemanager.max-completed-applications} will
     be reset to ${yarn.resourcemanager.max-completed-applications}.
-    Note that this value impacts the RM recovery performance.Typically,
+    Note that this value impacts the RM recovery performance. Typically,
     a smaller value indicates better performance on RM recovery.
     </description>
     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
@@ -687,7 +687,7 @@
   <property>
     <description>The maximum number of completed applications RM keeps. </description>
     <name>yarn.resourcemanager.max-completed-applications</name>
-    <value>10000</value>
+    <value>1000</value>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7352a28..c065b60 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -101,7 +101,7 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
     this.maxCompletedAppsInStateStore =
         conf.getInt(
           YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,
-          YarnConfiguration.DEFAULT_RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS);
+          this.maxCompletedAppsInMemory);
     if (this.maxCompletedAppsInStateStore > this.maxCompletedAppsInMemory) {
       this.maxCompletedAppsInStateStore = this.maxCompletedAppsInMemory;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/51] [abbrv] hadoop git commit: HDFS-10984. Expose nntop output as metrics. Contributed by Siddharth Wagle.

Posted by ae...@apache.org.
HDFS-10984. Expose nntop output as metrics. Contributed by Siddharth Wagle.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61f0490a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61f0490a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61f0490a

Branch: refs/heads/HDFS-7240
Commit: 61f0490a73085bbaf6639d9234277e59dc1145db
Parents: dacd3ec
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Oct 11 15:55:02 2016 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Oct 11 15:55:02 2016 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSNamesystem.java      |  6 ++
 .../server/namenode/top/metrics/TopMetrics.java | 67 ++++++++++++++++++--
 .../server/namenode/metrics/TestTopMetrics.java | 63 ++++++++++++++++++
 3 files changed, 129 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2471dc8..b9b02ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -89,6 +89,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.monotonicNow;
+import static org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics.TOPMETRICS_METRICS_SOURCE_NAME;
 
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
@@ -989,6 +990,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     // Add audit logger to calculate top users
     if (topConf.isEnabled) {
       topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs);
+      if (DefaultMetricsSystem.instance().getSource(
+          TOPMETRICS_METRICS_SOURCE_NAME) == null) {
+        DefaultMetricsSystem.instance().register(TOPMETRICS_METRICS_SOURCE_NAME,
+            "Top N operations by user", topMetrics);
+      }
       auditLoggers.add(new TopAuditLogger(topMetrics));
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index ab55392..2719c88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -17,24 +17,32 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top.metrics;
 
-import java.net.InetAddress;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
 import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.net.InetAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
 import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
 
 /**
@@ -58,8 +66,11 @@ import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowMan
  * Thread-safe: relies on thread-safety of RollingWindowManager
  */
 @InterfaceAudience.Private
-public class TopMetrics {
+public class TopMetrics implements MetricsSource {
   public static final Logger LOG = LoggerFactory.getLogger(TopMetrics.class);
+  public static final String TOPMETRICS_METRICS_SOURCE_NAME =
+      "NNTopUserOpCounts";
+  private final boolean isMetricsSourceEnabled;
 
   private static void logConf(Configuration conf) {
     LOG.info("NNTop conf: " + DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY +
@@ -83,6 +94,8 @@ public class TopMetrics {
       rollingWindowManagers.put(reportingPeriods[i], new RollingWindowManager(
           conf, reportingPeriods[i]));
     }
+    isMetricsSourceEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
+        DFSConfigKeys.NNTOP_ENABLED_DEFAULT);
   }
 
   /**
@@ -128,4 +141,44 @@ public class TopMetrics {
           TopConf.ALL_CMDS, userName, 1);
     }
   }
+
+  /**
+   * Flatten out the top window metrics into
+   * {@link org.apache.hadoop.metrics2.MetricsRecord}s for consumption by
+   * external metrics systems. Each metrics record added corresponds to the
+   * reporting period a.k.a window length of the configured rolling windows.
+   */
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    if (!isMetricsSourceEnabled) {
+      return;
+    }
+
+    for (final TopWindow window : getTopWindows()) {
+      MetricsRecordBuilder rb = collector.addRecord(buildOpRecordName(window))
+          .setContext("dfs");
+      for (final Op op: window.getOps()) {
+        rb.addCounter(buildOpTotalCountMetricsInfo(op), op.getTotalCount());
+        for (User user : op.getTopUsers()) {
+          rb.addCounter(buildOpRecordMetricsInfo(op, user), user.getCount());
+        }
+      }
+    }
+  }
+
+  private String buildOpRecordName(TopWindow window) {
+    return TOPMETRICS_METRICS_SOURCE_NAME + ".windowMs="
+      + window.getWindowLenMs();
+  }
+
+  private MetricsInfo buildOpTotalCountMetricsInfo(Op op) {
+    return Interns.info("op=" + StringUtils.deleteWhitespace(op.getOpType())
+      + ".TotalCount", "Total operation count");
+  }
+
+  private MetricsInfo buildOpRecordMetricsInfo(Op op, User user) {
+    return Interns.info("op=" + StringUtils.deleteWhitespace(op.getOpType())
+      + ".user=" + user.getUser()
+      + ".count", "Total operations performed by user");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
new file mode 100644
index 0000000..4d3a4f0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.metrics;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
+import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics.TOPMETRICS_METRICS_SOURCE_NAME;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Test for MetricsSource part of the {@link TopMetrics} impl.
+ */
+public class TestTopMetrics {
+  @Test
+  public void testPresence() {
+    Configuration conf = new Configuration();
+    TopConf topConf = new TopConf(conf);
+    TopMetrics topMetrics = new TopMetrics(conf,
+        topConf.nntopReportingPeriodsMs);
+    // Dummy command
+    topMetrics.report("test", "listStatus");
+    topMetrics.report("test", "listStatus");
+    topMetrics.report("test", "listStatus");
+
+    MetricsRecordBuilder rb = getMetrics(topMetrics);
+    MetricsCollector mc = rb.parent();
+
+    verify(mc).addRecord(TOPMETRICS_METRICS_SOURCE_NAME + ".windowMs=60000");
+    verify(mc).addRecord(TOPMETRICS_METRICS_SOURCE_NAME + ".windowMs=300000");
+    verify(mc).addRecord(TOPMETRICS_METRICS_SOURCE_NAME + ".windowMs=1500000");
+
+    verify(rb, times(3)).addCounter(Interns.info("op=listStatus.TotalCount",
+        "Total operation count"), 3L);
+    verify(rb, times(3)).addCounter(Interns.info("op=*.TotalCount",
+        "Total operation count"), 3L);
+
+    verify(rb, times(3)).addCounter(Interns.info("op=listStatus." +
+        "user=test.count", "Total operations performed by user"), 3L);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/51] [abbrv] hadoop git commit: MAPREDUCE-6780. Add support for HDFS directory with erasure code policy to TeraGen and TeraSort. Contributed by Sammi Chen

Posted by ae...@apache.org.
MAPREDUCE-6780. Add support for HDFS directory with erasure code policy to TeraGen and TeraSort. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bea004ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bea004ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bea004ea

Branch: refs/heads/HDFS-7240
Commit: bea004eaeb7ba33bf324ef3e7065cfdd614d8198
Parents: ec0b707
Author: Kai Zheng <ka...@intel.com>
Authored: Sun Oct 9 15:33:26 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Sun Oct 9 15:33:26 2016 +0600

----------------------------------------------------------------------
 .../hadoop/examples/terasort/TeraGen.java       |  3 +++
 .../examples/terasort/TeraOutputFormat.java     | 20 +++++++++++++++++---
 .../hadoop/examples/terasort/TeraSort.java      |  3 +++
 3 files changed, 23 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index 22fe344..7fbb22a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -246,6 +246,9 @@ public class TeraGen extends Configured implements Tool {
 
   private static void usage() throws IOException {
     System.err.println("teragen <num rows> <output dir>");
+    System.err.println("If you want to generate data and store them as " +
+        "erasure code striping file, just make sure that the parent dir " +
+        "of <output dir> has erasure code policy set");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
index fd3ea78..73c446d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.examples.terasort;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -40,6 +42,7 @@ import org.apache.hadoop.mapreduce.security.TokenCache;
  * An output format that writes the key and value appended together.
  */
 public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
+  private static final Log LOG = LogFactory.getLog(TeraOutputFormat.class);
   private OutputCommitter committer = null;
 
   /**
@@ -74,10 +77,22 @@ public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
       out.write(key.getBytes(), 0, key.getLength());
       out.write(value.getBytes(), 0, value.getLength());
     }
-    
+
     public void close(TaskAttemptContext context) throws IOException {
       if (finalSync) {
-        out.hsync();
+        try {
+          out.hsync();
+        } catch (UnsupportedOperationException e) {
+          /*
+           * Currently, hsync operation on striping file with erasure code
+           * policy is not supported yet. So this is a workaround to make
+           * teragen and terasort to support directory with striping files. In
+           * future, if the hsync operation is supported on striping file, this
+           * workaround should be removed.
+           */
+          LOG.info("Operation hsync is not supported so far on path with " +
+                  "erasure code policy set");
+        }
       }
       out.close();
     }
@@ -135,5 +150,4 @@ public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
     }
     return committer;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea004ea/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
index 9beff3e..040d13f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
@@ -287,6 +287,9 @@ public class TeraSort extends Configured implements Tool {
     for (TeraSortConfigKeys teraSortConfigKeys : TeraSortConfigKeys.values()) {
       System.err.println(teraSortConfigKeys.toString());
     }
+    System.err.println("If you want to store the output data as " +
+        "erasure code striping file, just make sure that the parent dir " +
+        "of <out> has erasure code policy set");
   }
 
   public int run(String[] args) throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/51] [abbrv] hadoop git commit: HADOOP-13669. KMS Server should log exceptions before throwing. Contributed by Suraj Acharya.

Posted by ae...@apache.org.
HADOOP-13669. KMS Server should log exceptions before throwing. Contributed by Suraj Acharya.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65912e40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65912e40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65912e40

Branch: refs/heads/HDFS-7240
Commit: 65912e4027548868ebefd8ee36eb00fa889704a7
Parents: 0306007
Author: Xiao Chen <xi...@apache.org>
Authored: Mon Oct 10 12:49:19 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon Oct 10 12:51:12 2016 -0700

----------------------------------------------------------------------
 .../hadoop/crypto/key/kms/server/KMS.java       | 711 ++++++++++---------
 1 file changed, 392 insertions(+), 319 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65912e40/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index 371f3f5..d8755ec 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -104,89 +104,101 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   @SuppressWarnings("unchecked")
   public Response createKey(Map jsonKey) throws Exception {
-    LOG.trace("Entering createKey Method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
-    KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
-    assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
-    String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
-    final String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
-    int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
-                 ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
-    String description = (String)
-        jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
-    LOG.debug("Creating key with name {}, cipher being used{}, " +
-            "length of key {}, description of key {}", name, cipher,
-             length, description);
-    Map<String, String> attributes = (Map<String, String>)
-        jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
-    if (material != null) {
-      assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
-          KMSOp.CREATE_KEY, name);
-    }
-    final KeyProvider.Options options = new KeyProvider.Options(
-        KMSWebApp.getConfiguration());
-    if (cipher != null) {
-      options.setCipher(cipher);
-    }
-    if (length != 0) {
-      options.setBitLength(length);
-    }
-    options.setDescription(description);
-    options.setAttributes(attributes);
-
-    KeyProvider.KeyVersion keyVersion = user.doAs(
-        new PrivilegedExceptionAction<KeyVersion>() {
-          @Override
-          public KeyVersion run() throws Exception {
-            KeyProvider.KeyVersion keyVersion = (material != null)
-              ? provider.createKey(name, Base64.decodeBase64(material), options)
-              : provider.createKey(name, options);
-            provider.flush();
-            return keyVersion;
+    try{
+      LOG.trace("Entering createKey Method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
+      KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
+      assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
+      String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
+      final String material;
+      material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
+      int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
+                   ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
+      String description = (String)
+          jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
+      LOG.debug("Creating key with name {}, cipher being used{}, " +
+              "length of key {}, description of key {}", name, cipher,
+               length, description);
+      Map<String, String> attributes = (Map<String, String>)
+          jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
+      if (material != null) {
+        assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
+            KMSOp.CREATE_KEY, name);
+      }
+      final KeyProvider.Options options = new KeyProvider.Options(
+          KMSWebApp.getConfiguration());
+      if (cipher != null) {
+        options.setCipher(cipher);
+      }
+      if (length != 0) {
+        options.setBitLength(length);
+      }
+      options.setDescription(description);
+      options.setAttributes(attributes);
+
+      KeyProvider.KeyVersion keyVersion = user.doAs(
+          new PrivilegedExceptionAction<KeyVersion>() {
+            @Override
+            public KeyVersion run() throws Exception {
+              KeyProvider.KeyVersion keyVersion = (material != null)
+                  ? provider.createKey(name, Base64.decodeBase64(material),
+                      options)
+                  : provider.createKey(name, options);
+              provider.flush();
+              return keyVersion;
+            }
           }
-        }
-    );
+      );
 
-    kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
-        (material != null) + " Description:" + description);
+      kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
+          (material != null) + " Description:" + description);
 
-    if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
-      keyVersion = removeKeyMaterial(keyVersion);
+      if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
+        keyVersion = removeKeyMaterial(keyVersion);
+      }
+      Map json = KMSServerJSONUtils.toJSON(keyVersion);
+      String requestURL = KMSMDCFilter.getURL();
+      int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
+      requestURL = requestURL.substring(0, idx);
+      LOG.trace("Exiting createKey Method.");
+      return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
+          .type(MediaType.APPLICATION_JSON)
+          .header("Location", getKeyURI(requestURL, name)).entity(json).build();
+    } catch (Exception e) {
+      LOG.debug("Exception in createKey.", e);
+      throw e;
     }
-    Map json = KMSServerJSONUtils.toJSON(keyVersion);
-    String requestURL = KMSMDCFilter.getURL();
-    int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
-    requestURL = requestURL.substring(0, idx);
-    LOG.trace("Exiting createKey Method.");
-    return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
-        .type(MediaType.APPLICATION_JSON)
-        .header("Location", getKeyURI(requestURL, name)).entity(json).build();
   }
 
   @DELETE
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
   public Response deleteKey(@PathParam("name") final String name)
       throws Exception {
-    LOG.trace("Entering deleteKey method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
-    KMSClientProvider.checkNotEmpty(name, "name");
-    LOG.debug("Deleting key with name {}.", name);
-    user.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        provider.deleteKey(name);
-        provider.flush();
-        return null;
-      }
-    });
-
-    kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
-    LOG.trace("Exiting deleteKey method.");
-    return Response.ok().build();
+    try {
+      LOG.trace("Entering deleteKey method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
+      KMSClientProvider.checkNotEmpty(name, "name");
+      LOG.debug("Deleting key with name {}.", name);
+      user.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          provider.deleteKey(name);
+          provider.flush();
+          return null;
+        }
+      });
+
+      kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
+      LOG.trace("Exiting deleteKey method.");
+      return Response.ok().build();
+    } catch (Exception e) {
+      LOG.debug("Exception in deleteKey.", e);
+      throw e;
+    }
   }
 
   @POST
@@ -195,41 +207,49 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response rolloverKey(@PathParam("name") final String name,
       Map jsonMaterial) throws Exception {
-    LOG.trace("Entering rolloverKey Method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
-    KMSClientProvider.checkNotEmpty(name, "name");
-    LOG.debug("Rolling key with name {}.", name);
-    final String material = (String)
-        jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
-    if (material != null) {
-      assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
-          KMSOp.ROLL_NEW_VERSION, name);
-    }
+    try {
+      LOG.trace("Entering rolloverKey Method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
+      KMSClientProvider.checkNotEmpty(name, "name");
+      LOG.debug("Rolling key with name {}.", name);
+      final String material = (String)
+              jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
+      if (material != null) {
+        assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
+                KMSOp.ROLL_NEW_VERSION, name);
+      }
 
-    KeyProvider.KeyVersion keyVersion = user.doAs(
-        new PrivilegedExceptionAction<KeyVersion>() {
-          @Override
-          public KeyVersion run() throws Exception {
-            KeyVersion keyVersion = (material != null)
-              ? provider.rollNewVersion(name, Base64.decodeBase64(material))
-              : provider.rollNewVersion(name);
-            provider.flush();
-            return keyVersion;
-          }
-        }
-    );
+      KeyProvider.KeyVersion keyVersion = user.doAs(
+              new PrivilegedExceptionAction<KeyVersion>() {
+              @Override
+                public KeyVersion run() throws Exception {
+                KeyVersion keyVersion = (material != null)
+                        ? provider.rollNewVersion(name,
+                        Base64.decodeBase64(material))
+                        : provider.rollNewVersion(name);
+                provider.flush();
+                return keyVersion;
+              }
+            }
+      );
 
-    kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
-        (material != null) + " NewVersion:" + keyVersion.getVersionName());
+      kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
+              (material != null) +
+              " NewVersion:" + keyVersion.getVersionName());
 
-    if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
-      keyVersion = removeKeyMaterial(keyVersion);
+      if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
+        keyVersion = removeKeyMaterial(keyVersion);
+      }
+      Map json = KMSServerJSONUtils.toJSON(keyVersion);
+      LOG.trace("Exiting rolloverKey Method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in rolloverKey.", e);
+      throw e;
     }
-    Map json = KMSServerJSONUtils.toJSON(keyVersion);
-    LOG.trace("Exiting rolloverKey Method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
   @GET
@@ -237,59 +257,76 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeysMetadata(@QueryParam(KMSRESTConstants.KEY)
       List<String> keyNamesList) throws Exception {
-    LOG.trace("Entering getKeysMetadata method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    final String[] keyNames = keyNamesList.toArray(
-        new String[keyNamesList.size()]);
-    assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
-
-    KeyProvider.Metadata[] keysMeta = user.doAs(
-        new PrivilegedExceptionAction<KeyProvider.Metadata[]>() {
-          @Override
-          public KeyProvider.Metadata[] run() throws Exception {
-            return provider.getKeysMetadata(keyNames);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getKeysMetadata method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      final String[] keyNames = keyNamesList.toArray(
+              new String[keyNamesList.size()]);
+      assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
+
+      KeyProvider.Metadata[] keysMeta = user.doAs(
+              new PrivilegedExceptionAction<KeyProvider.Metadata[]>() {
+              @Override
+                public KeyProvider.Metadata[] run() throws Exception {
+                return provider.getKeysMetadata(keyNames);
+              }
+            }
+      );
 
-    Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
-    kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
-    LOG.trace("Exiting getKeysMetadata method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
+      kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
+      LOG.trace("Exiting getKeysMetadata method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getKeysmetadata.", e);
+      throw e;
+    }
   }
 
   @GET
   @Path(KMSRESTConstants.KEYS_NAMES_RESOURCE)
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyNames() throws Exception {
-    LOG.trace("Entering getKeyNames method.");
-    KMSWebApp.getAdminCallsMeter().mark();
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
-
-    List<String> json = user.doAs(
-        new PrivilegedExceptionAction<List<String>>() {
-          @Override
-          public List<String> run() throws Exception {
-            return provider.getKeys();
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getKeyNames method.");
+      KMSWebApp.getAdminCallsMeter().mark();
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
+
+      List<String> json = user.doAs(
+              new PrivilegedExceptionAction<List<String>>() {
+              @Override
+                public List<String> run() throws Exception {
+                return provider.getKeys();
+              }
+            }
+      );
 
-    kmsAudit.ok(user, KMSOp.GET_KEYS, "");
-    LOG.trace("Exiting getKeyNames method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      kmsAudit.ok(user, KMSOp.GET_KEYS, "");
+      LOG.trace("Exiting getKeyNames method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getkeyNames.", e);
+      throw e;
+    }
   }
 
   @GET
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
   public Response getKey(@PathParam("name") String name)
       throws Exception {
-    LOG.trace("Entering getKey method.");
-    LOG.debug("Getting key information for key with name {}.", name);
-    LOG.trace("Exiting getKey method.");
-    return getMetadata(name);
+    try {
+      LOG.trace("Entering getKey method.");
+      LOG.debug("Getting key information for key with name {}.", name);
+      LOG.trace("Exiting getKey method.");
+      return getMetadata(name);
+    } catch (Exception e) {
+      LOG.debug("Exception in getKey.", e);
+      throw e;
+    }
   }
 
   @GET
@@ -298,26 +335,32 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getMetadata(@PathParam("name") final String name)
       throws Exception {
-    LOG.trace("Entering getMetadata method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(name, "name");
-    KMSWebApp.getAdminCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
-    LOG.debug("Getting metadata for key with name {}.", name);
-
-    KeyProvider.Metadata metadata = user.doAs(
-        new PrivilegedExceptionAction<KeyProvider.Metadata>() {
-          @Override
-          public KeyProvider.Metadata run() throws Exception {
-            return provider.getMetadata(name);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getMetadata method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      KMSWebApp.getAdminCallsMeter().mark();
+      assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
+      LOG.debug("Getting metadata for key with name {}.", name);
+
+      KeyProvider.Metadata metadata = user.doAs(
+              new PrivilegedExceptionAction<KeyProvider.Metadata>() {
+              @Override
+                public KeyProvider.Metadata run() throws Exception {
+                return provider.getMetadata(name);
+              }
+            }
+      );
 
-    Object json = KMSServerJSONUtils.toJSON(name, metadata);
-    kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
-    LOG.trace("Exiting getMetadata method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      Object json = KMSServerJSONUtils.toJSON(name, metadata);
+      kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
+      LOG.trace("Exiting getMetadata method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getMetadata.", e);
+      throw e;
+    }
   }
 
   @GET
@@ -326,26 +369,32 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getCurrentVersion(@PathParam("name") final String name)
       throws Exception {
-    LOG.trace("Entering getCurrentVersion method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(name, "name");
-    KMSWebApp.getKeyCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
-    LOG.debug("Getting key version for key with name {}.", name);
-
-    KeyVersion keyVersion = user.doAs(
-        new PrivilegedExceptionAction<KeyVersion>() {
-          @Override
-          public KeyVersion run() throws Exception {
-            return provider.getCurrentKey(name);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getCurrentVersion method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      KMSWebApp.getKeyCallsMeter().mark();
+      assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
+      LOG.debug("Getting key version for key with name {}.", name);
+
+      KeyVersion keyVersion = user.doAs(
+              new PrivilegedExceptionAction<KeyVersion>() {
+              @Override
+                public KeyVersion run() throws Exception {
+                return provider.getCurrentKey(name);
+            }
+            }
+      );
 
-    Object json = KMSServerJSONUtils.toJSON(keyVersion);
-    kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
-    LOG.trace("Exiting getCurrentVersion method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      Object json = KMSServerJSONUtils.toJSON(keyVersion);
+      kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
+      LOG.trace("Exiting getCurrentVersion method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getCurrentVersion.", e);
+      throw e;
+    }
   }
 
   @GET
@@ -353,28 +402,34 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyVersion(
       @PathParam("versionName") final String versionName) throws Exception {
-    LOG.trace("Entering getKeyVersion method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(versionName, "versionName");
-    KMSWebApp.getKeyCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
-    LOG.debug("Getting key with version name {}.", versionName);
-
-    KeyVersion keyVersion = user.doAs(
-        new PrivilegedExceptionAction<KeyVersion>() {
-          @Override
-          public KeyVersion run() throws Exception {
-            return provider.getKeyVersion(versionName);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getKeyVersion method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(versionName, "versionName");
+      KMSWebApp.getKeyCallsMeter().mark();
+      assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
+      LOG.debug("Getting key with version name {}.", versionName);
+
+      KeyVersion keyVersion = user.doAs(
+              new PrivilegedExceptionAction<KeyVersion>() {
+              @Override
+                public KeyVersion run() throws Exception {
+                return provider.getKeyVersion(versionName);
+              }
+            }
+      );
 
-    if (keyVersion != null) {
-      kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
+      if (keyVersion != null) {
+        kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
+      }
+      Object json = KMSServerJSONUtils.toJSON(keyVersion);
+      LOG.trace("Exiting getKeyVersion method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getKeyVersion.", e);
+      throw e;
     }
-    Object json = KMSServerJSONUtils.toJSON(keyVersion);
-    LOG.trace("Exiting getKeyVersion method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
   @SuppressWarnings({ "rawtypes", "unchecked" })
@@ -388,60 +443,65 @@ public class KMS {
           @DefaultValue("1")
           @QueryParam(KMSRESTConstants.EEK_NUM_KEYS) final int numKeys)
           throws Exception {
-    LOG.trace("Entering generateEncryptedKeys method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(name, "name");
-    KMSClientProvider.checkNotNull(edekOp, "eekOp");
-    LOG.debug("Generating encrypted key with name {}," +
-            " the edek Operation is {}.", name, edekOp);
-
-    Object retJSON;
-    if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
-      LOG.debug("edek Operation is Generate.");
-      assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
-
-      final List<EncryptedKeyVersion> retEdeks =
-          new LinkedList<EncryptedKeyVersion>();
-      try {
-
-        user.doAs(
-            new PrivilegedExceptionAction<Void>() {
-              @Override
-              public Void run() throws Exception {
-                  LOG.debug("Generated Encrypted key for {} number of keys.",
-                          numKeys);
-                for (int i = 0; i < numKeys; i++) {
-                  retEdeks.add(provider.generateEncryptedKey(name));
+    try {
+      LOG.trace("Entering generateEncryptedKeys method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      KMSClientProvider.checkNotNull(edekOp, "eekOp");
+      LOG.debug("Generating encrypted key with name {}," +
+              " the edek Operation is {}.", name, edekOp);
+
+      Object retJSON;
+      if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
+        LOG.debug("edek Operation is Generate.");
+        assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
+
+        final List<EncryptedKeyVersion> retEdeks =
+                new LinkedList<EncryptedKeyVersion>();
+        try {
+
+          user.doAs(
+                  new PrivilegedExceptionAction<Void>() {
+                  @Override
+                    public Void run() throws Exception {
+                    LOG.debug("Generated Encrypted key for {} number of " +
+                              "keys.", numKeys);
+                    for (int i = 0; i < numKeys; i++) {
+                      retEdeks.add(provider.generateEncryptedKey(name));
+                    }
+                    return null;
+                  }
                 }
-                return null;
-              }
-            }
-        );
+          );
 
-      } catch (Exception e) {
-        LOG.error("Exception in generateEncryptedKeys:", e);
-        throw new IOException(e);
-      }
-      kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
-      retJSON = new ArrayList();
-      for (EncryptedKeyVersion edek : retEdeks) {
-        ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
+        } catch (Exception e) {
+          LOG.error("Exception in generateEncryptedKeys:", e);
+          throw new IOException(e);
+        }
+        kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
+        retJSON = new ArrayList();
+        for (EncryptedKeyVersion edek : retEdeks) {
+          ((ArrayList) retJSON).add(KMSServerJSONUtils.toJSON(edek));
+        }
+      } else {
+        StringBuilder error;
+        error = new StringBuilder("IllegalArgumentException Wrong ");
+        error.append(KMSRESTConstants.EEK_OP);
+        error.append(" value, it must be ");
+        error.append(KMSRESTConstants.EEK_GENERATE);
+        error.append(" or ");
+        error.append(KMSRESTConstants.EEK_DECRYPT);
+        LOG.error(error.toString());
+        throw new IllegalArgumentException(error.toString());
       }
-    } else {
-      StringBuilder error;
-      error = new StringBuilder("IllegalArgumentException Wrong ");
-      error.append(KMSRESTConstants.EEK_OP);
-      error.append(" value, it must be ");
-      error.append(KMSRESTConstants.EEK_GENERATE);
-      error.append(" or ");
-      error.append(KMSRESTConstants.EEK_DECRYPT);
-      LOG.error(error.toString());
-      throw new IllegalArgumentException(error.toString());
+      KMSWebApp.getGenerateEEKCallsMeter().mark();
+      LOG.trace("Exiting generateEncryptedKeys method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in generateEncryptedKeys.", e);
+      throw e;
     }
-    KMSWebApp.getGenerateEEKCallsMeter().mark();
-    LOG.trace("Exiting generateEncryptedKeys method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
-        .build();
   }
 
   @SuppressWarnings("rawtypes")
@@ -454,57 +514,64 @@ public class KMS {
       @QueryParam(KMSRESTConstants.EEK_OP) String eekOp,
       Map jsonPayload)
       throws Exception {
-    LOG.trace("Entering decryptEncryptedKey method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(versionName, "versionName");
-    KMSClientProvider.checkNotNull(eekOp, "eekOp");
-    LOG.debug("Decrypting key for {}, the edek Operation is {}.",
-            versionName, eekOp);
-
-    final String keyName = (String) jsonPayload.get(
-        KMSRESTConstants.NAME_FIELD);
-    String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
-    String encMaterialStr =
-        (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
-    Object retJSON;
-    if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
-      assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
-      KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
-      final byte[] iv = Base64.decodeBase64(ivStr);
-      KMSClientProvider.checkNotNull(encMaterialStr,
-          KMSRESTConstants.MATERIAL_FIELD);
-      final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
-
-      KeyProvider.KeyVersion retKeyVersion = user.doAs(
-          new PrivilegedExceptionAction<KeyVersion>() {
-            @Override
-            public KeyVersion run() throws Exception {
-              return provider.decryptEncryptedKey(
-                  new KMSClientProvider.KMSEncryptedKeyVersion(keyName,
-                      versionName, iv, KeyProviderCryptoExtension.EEK,
-                      encMaterial)
-              );
-            }
-          }
-      );
+    try {
+      LOG.trace("Entering decryptEncryptedKey method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(versionName, "versionName");
+      KMSClientProvider.checkNotNull(eekOp, "eekOp");
+      LOG.debug("Decrypting key for {}, the edek Operation is {}.",
+              versionName, eekOp);
+
+      final String keyName = (String) jsonPayload.get(
+              KMSRESTConstants.NAME_FIELD);
+      String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
+      String encMaterialStr =
+              (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
+      Object retJSON;
+      if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
+        assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK,
+                keyName);
+        KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
+        final byte[] iv = Base64.decodeBase64(ivStr);
+        KMSClientProvider.checkNotNull(encMaterialStr,
+                KMSRESTConstants.MATERIAL_FIELD);
+        final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
+
+        KeyProvider.KeyVersion retKeyVersion = user.doAs(
+                new PrivilegedExceptionAction<KeyVersion>() {
+                @Override
+                  public KeyVersion run() throws Exception {
+                  return provider.decryptEncryptedKey(
+                            new KMSClientProvider.KMSEncryptedKeyVersion(
+                                    keyName, versionName, iv,
+                                            KeyProviderCryptoExtension.EEK,
+                                            encMaterial)
+                    );
+                }
+              }
+        );
 
-      retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
-      kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
-    } else {
-      StringBuilder error;
-      error = new StringBuilder("IllegalArgumentException Wrong ");
-      error.append(KMSRESTConstants.EEK_OP);
-      error.append(" value, it must be ");
-      error.append(KMSRESTConstants.EEK_GENERATE);
-      error.append(" or ");
-      error.append(KMSRESTConstants.EEK_DECRYPT);
-      LOG.error(error.toString());
-      throw new IllegalArgumentException(error.toString());
+        retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
+        kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
+      } else {
+        StringBuilder error;
+        error = new StringBuilder("IllegalArgumentException Wrong ");
+        error.append(KMSRESTConstants.EEK_OP);
+        error.append(" value, it must be ");
+        error.append(KMSRESTConstants.EEK_GENERATE);
+        error.append(" or ");
+        error.append(KMSRESTConstants.EEK_DECRYPT);
+        LOG.error(error.toString());
+        throw new IllegalArgumentException(error.toString());
+      }
+      KMSWebApp.getDecryptEEKCallsMeter().mark();
+      LOG.trace("Exiting decryptEncryptedKey method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in decryptEncryptedKey.", e);
+      throw e;
     }
-    KMSWebApp.getDecryptEEKCallsMeter().mark();
-    LOG.trace("Exiting decryptEncryptedKey method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
-        .build();
   }
 
   @GET
@@ -513,26 +580,32 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyVersions(@PathParam("name") final String name)
       throws Exception {
-    LOG.trace("Entering getKeyVersions method.");
-    UserGroupInformation user = HttpUserGroupInformation.get();
-    KMSClientProvider.checkNotEmpty(name, "name");
-    KMSWebApp.getKeyCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
-    LOG.debug("Getting key versions for key {}", name);
-
-    List<KeyVersion> ret = user.doAs(
-        new PrivilegedExceptionAction<List<KeyVersion>>() {
-          @Override
-          public List<KeyVersion> run() throws Exception {
-            return provider.getKeyVersions(name);
-          }
-        }
-    );
+    try {
+      LOG.trace("Entering getKeyVersions method.");
+      UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSClientProvider.checkNotEmpty(name, "name");
+      KMSWebApp.getKeyCallsMeter().mark();
+      assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
+      LOG.debug("Getting key versions for key {}", name);
+
+      List<KeyVersion> ret = user.doAs(
+              new PrivilegedExceptionAction<List<KeyVersion>>() {
+              @Override
+                public List<KeyVersion> run() throws Exception {
+                return provider.getKeyVersions(name);
+              }
+            }
+      );
 
-    Object json = KMSServerJSONUtils.toJSON(ret);
-    kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
-    LOG.trace("Exiting getKeyVersions method.");
-    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
+      Object json = KMSServerJSONUtils.toJSON(ret);
+      kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
+      LOG.trace("Exiting getKeyVersions method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
+              .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in getKeyVersions.", e);
+      throw e;
+    }
   }
 
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/51] [abbrv] hadoop git commit: HADOOP-13627. Have an explicit KerberosAuthException for UGI to throw, text from public constants. Contributed by Xiao Chen.

Posted by ae...@apache.org.
HADOOP-13627. Have an explicit KerberosAuthException for UGI to throw, text from public constants. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e853be6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e853be6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e853be6

Branch: refs/heads/HDFS-7240
Commit: 2e853be6577a5b98fd860e6d64f89ca6d160514a
Parents: 3565c9a
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Oct 7 13:46:27 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Oct 7 13:46:27 2016 -0700

----------------------------------------------------------------------
 .../hadoop/security/KerberosAuthException.java  | 118 +++++++++++++++++++
 .../hadoop/security/UGIExceptionMessages.java   |  46 ++++++++
 .../hadoop/security/UserGroupInformation.java   |  74 +++++++-----
 3 files changed, 209 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853be6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
new file mode 100644
index 0000000..811c7c9
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosAuthException.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import static org.apache.hadoop.security.UGIExceptionMessages.*;
+
+import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Thrown when {@link UserGroupInformation} failed with an unrecoverable error,
+ * such as failure in kerberos login/logout, invalid subject etc.
+ *
+ * Caller should not retry when catching this exception.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class KerberosAuthException extends IOException {
+  static final long serialVersionUID = 31L;
+
+  private String user;
+  private String principal;
+  private String keytabFile;
+  private String ticketCacheFile;
+  private String initialMessage;
+
+  public KerberosAuthException(String msg) {
+    super(msg);
+  }
+
+  public KerberosAuthException(Throwable cause) {
+    super(cause);
+  }
+
+  public KerberosAuthException(String initialMsg, Throwable cause) {
+    this(cause);
+    initialMessage = initialMsg;
+  }
+
+  public void setUser(final String u) {
+    user = u;
+  }
+
+  public void setPrincipal(final String p) {
+    principal = p;
+  }
+
+  public void setKeytabFile(final String k) {
+    keytabFile = k;
+  }
+
+  public void setTicketCacheFile(final String t) {
+    ticketCacheFile = t;
+  }
+
+  /** @return The initial message, or null if not set. */
+  public String getInitialMessage() {
+    return initialMessage;
+  }
+
+  /** @return The keytab file path, or null if not set. */
+  public String getKeytabFile() {
+    return keytabFile;
+  }
+
+  /** @return The principal, or null if not set. */
+  public String getPrincipal() {
+    return principal;
+  }
+
+  /** @return The ticket cache file path, or null if not set. */
+  public String getTicketCacheFile() {
+    return ticketCacheFile;
+  }
+
+  /** @return The user, or null if not set. */
+  public String getUser() {
+    return user;
+  }
+
+  @Override
+  public String getMessage() {
+    final StringBuilder sb = new StringBuilder();
+    if (initialMessage != null) {
+      sb.append(initialMessage);
+    }
+    if (user != null) {
+      sb.append(FOR_USER + user);
+    }
+    if (principal != null) {
+      sb.append(FOR_PRINCIPAL + principal);
+    }
+    if (keytabFile != null) {
+      sb.append(FROM_KEYTAB + keytabFile);
+    }
+    if (ticketCacheFile != null) {
+      sb.append(USING_TICKET_CACHE_FILE+ ticketCacheFile);
+    }
+    sb.append(" " + super.getMessage());
+    return sb.toString();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853be6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UGIExceptionMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UGIExceptionMessages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UGIExceptionMessages.java
new file mode 100644
index 0000000..c4d30e5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UGIExceptionMessages.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+/**
+ * Standard strings to use in exception messages
+ * in {@link KerberosAuthException} when throwing.
+ */
+final class UGIExceptionMessages {
+
+  public static final String FAILURE_TO_LOGIN = "failure to login:";
+  public static final String FOR_USER = " for user: ";
+  public static final String FOR_PRINCIPAL = " for principal: ";
+  public static final String FROM_KEYTAB = " from keytab ";
+  public static final String LOGIN_FAILURE = "Login failure";
+  public static final String LOGOUT_FAILURE = "Logout failure";
+  public static final String MUST_FIRST_LOGIN =
+      "login must be done first";
+  public static final String MUST_FIRST_LOGIN_FROM_KEYTAB =
+      "loginUserFromKeyTab must be done first";
+  public static final String SUBJECT_MUST_CONTAIN_PRINCIPAL =
+      "Provided Subject must contain a KerberosPrincipal";
+  public static final String SUBJECT_MUST_NOT_BE_NULL =
+      "Subject must not be null";
+  public static final String USING_TICKET_CACHE_FILE =
+      " using ticket cache file: ";
+
+  //checkstyle: Utility classes should not have a public or default constructor.
+  private UGIExceptionMessages() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853be6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 637e3fa..329859d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_MET
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
+import static org.apache.hadoop.security.UGIExceptionMessages.*;
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 import java.io.File;
@@ -755,8 +756,11 @@ public class UserGroupInformation {
       ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
       return ugi;
     } catch (LoginException le) {
-      throw new IOException("failure to login using ticket cache file " +
-          ticketCache, le);
+      KerberosAuthException kae =
+          new KerberosAuthException(FAILURE_TO_LOGIN, le);
+      kae.setUser(user);
+      kae.setTicketCacheFile(ticketCache);
+      throw kae;
     }
   }
 
@@ -765,16 +769,17 @@ public class UserGroupInformation {
    *
    * @param subject             The KerberosPrincipal to use in UGI
    *
-   * @throws IOException        if the kerberos login fails
+   * @throws IOException
+   * @throws KerberosAuthException if the kerberos login fails
    */
   public static UserGroupInformation getUGIFromSubject(Subject subject)
       throws IOException {
     if (subject == null) {
-      throw new IOException("Subject must not be null");
+      throw new KerberosAuthException(SUBJECT_MUST_NOT_BE_NULL);
     }
 
     if (subject.getPrincipals(KerberosPrincipal.class).isEmpty()) {
-      throw new IOException("Provided Subject must contain a KerberosPrincipal");
+      throw new KerberosAuthException(SUBJECT_MUST_CONTAIN_PRINCIPAL);
     }
 
     KerberosPrincipal principal =
@@ -894,7 +899,7 @@ public class UserGroupInformation {
       loginUser.spawnAutoRenewalThreadForUserCreds();
     } catch (LoginException le) {
       LOG.debug("failure to login", le);
-      throw new IOException("failure to login: " + le, le);
+      throw new KerberosAuthException(FAILURE_TO_LOGIN, le);
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug("UGI loginUser:"+loginUser);
@@ -1001,7 +1006,8 @@ public class UserGroupInformation {
    * file and logs them in. They become the currently logged-in user.
    * @param user the principal name to load from the keytab
    * @param path the path to the keytab file
-   * @throws IOException if the keytab file can't be read
+   * @throws IOException
+   * @throws KerberosAuthException if it's a kerberos login exception.
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
@@ -1030,8 +1036,10 @@ public class UserGroupInformation {
       if (start > 0) {
         metrics.loginFailure.add(Time.now() - start);
       }
-      throw new IOException("Login failure for " + user + " from keytab " + 
-                            path+ ": " + le, le);
+      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
+      kae.setUser(user);
+      kae.setKeytabFile(path);
+      throw kae;
     }
     LOG.info("Login successful for user " + keytabPrincipal
         + " using keytab file " + keytabFile);
@@ -1042,8 +1050,9 @@ public class UserGroupInformation {
    * This method assumes that the user logged in by calling
    * {@link #loginUserFromKeytab(String, String)}.
    *
-   * @throws IOException if a failure occurred in logout, or if the user did
-   * not log in by invoking loginUserFromKeyTab() before.
+   * @throws IOException
+   * @throws KerberosAuthException if a failure occurred in logout,
+   * or if the user did not log in by invoking loginUserFromKeyTab() before.
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
@@ -1054,7 +1063,7 @@ public class UserGroupInformation {
     }
     LoginContext login = getLogin();
     if (login == null || keytabFile == null) {
-      throw new IOException("loginUserFromKeytab must be done first");
+      throw new KerberosAuthException(MUST_FIRST_LOGIN_FROM_KEYTAB);
     }
 
     try {
@@ -1065,9 +1074,10 @@ public class UserGroupInformation {
         login.logout();
       }
     } catch (LoginException le) {
-      throw new IOException("Logout failure for " + user + " from keytab " +
-          keytabFile + ": " + le,
-          le);
+      KerberosAuthException kae = new KerberosAuthException(LOGOUT_FAILURE, le);
+      kae.setUser(user.toString());
+      kae.setKeytabFile(keytabFile);
+      throw kae;
     }
 
     LOG.info("Logout successful for user " + keytabPrincipal
@@ -1078,6 +1088,7 @@ public class UserGroupInformation {
    * Re-login a user from keytab if TGT is expired or is close to expiry.
    * 
    * @throws IOException
+   * @throws KerberosAuthException if it's a kerberos login exception.
    */
   public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
     if (!isSecurityEnabled()
@@ -1099,12 +1110,12 @@ public class UserGroupInformation {
    * happened already.
    * The Subject field of this UserGroupInformation object is updated to have
    * the new credentials.
-   * @throws IOException on a failure
+   * @throws IOException
+   * @throws KerberosAuthException on a failure
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized void reloginFromKeytab()
-  throws IOException {
+  public synchronized void reloginFromKeytab() throws IOException {
     if (!isSecurityEnabled() ||
          user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS ||
          !isKeytab)
@@ -1124,7 +1135,7 @@ public class UserGroupInformation {
     
     LoginContext login = getLogin();
     if (login == null || keytabFile == null) {
-      throw new IOException("loginUserFromKeyTab must be done first");
+      throw new KerberosAuthException(MUST_FIRST_LOGIN_FROM_KEYTAB);
     }
     
     long start = 0;
@@ -1156,8 +1167,10 @@ public class UserGroupInformation {
       if (start > 0) {
         metrics.loginFailure.add(Time.now() - start);
       }
-      throw new IOException("Login failure for " + keytabPrincipal + 
-          " from keytab " + keytabFile + ": " + le, le);
+      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
+      kae.setPrincipal(keytabPrincipal);
+      kae.setKeytabFile(keytabFile);
+      throw kae;
     } 
   }
 
@@ -1166,19 +1179,19 @@ public class UserGroupInformation {
    * method assumes that login had happened already.
    * The Subject field of this UserGroupInformation object is updated to have
    * the new credentials.
-   * @throws IOException on a failure
+   * @throws IOException
+   * @throws KerberosAuthException on a failure
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized void reloginFromTicketCache()
-  throws IOException {
+  public synchronized void reloginFromTicketCache() throws IOException {
     if (!isSecurityEnabled() || 
         user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS ||
         !isKrbTkt)
       return;
     LoginContext login = getLogin();
     if (login == null) {
-      throw new IOException("login must be done first");
+      throw new KerberosAuthException(MUST_FIRST_LOGIN);
     }
     long now = Time.now();
     if (!hasSufficientTimeElapsed(now)) {
@@ -1205,8 +1218,9 @@ public class UserGroupInformation {
       login.login();
       setLogin(login);
     } catch (LoginException le) {
-      throw new IOException("Login failure for " + getUserName() + ": " + le,
-          le);
+      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
+      kae.setUser(getUserName());
+      throw kae;
     } 
   }
 
@@ -1252,8 +1266,10 @@ public class UserGroupInformation {
       if (start > 0) {
         metrics.loginFailure.add(Time.now() - start);
       }
-      throw new IOException("Login failure for " + user + " from keytab " + 
-                            path + ": " + le, le);
+      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
+      kae.setUser(user);
+      kae.setKeytabFile(path);
+      throw kae;
     } finally {
       if(oldKeytabFile != null) keytabFile = oldKeytabFile;
       if(oldKeytabPrincipal != null) keytabPrincipal = oldKeytabPrincipal;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/51] [abbrv] hadoop git commit: HDFS-10991. Export hdfsTruncateFile symbol in libhdfs. Contributed by Surendra Singh Lilhore.

Posted by ae...@apache.org.
HDFS-10991. Export hdfsTruncateFile symbol in libhdfs. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dacd3ec6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dacd3ec6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dacd3ec6

Branch: refs/heads/HDFS-7240
Commit: dacd3ec66b111be24131957c986f0c748cf9ea26
Parents: 8a09bf7
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Oct 11 15:07:14 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Oct 11 15:07:14 2016 -0700

----------------------------------------------------------------------
 .../src/main/native/libhdfs/include/hdfs/hdfs.h                     | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dacd3ec6/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
index c856928..83c1c59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -493,6 +493,7 @@ extern  "C" {
      *         complete before proceeding with further file updates.
      *         -1 on error.
      */
+    LIBHDFS_EXTERNAL
     int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength);
 
     /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/51] [abbrv] hadoop git commit: HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN based on client request. Contributed by Xiaoyu Yao.

Posted by ae...@apache.org.
HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN based on client request. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9097e2ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9097e2ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9097e2ef

Branch: refs/heads/HDFS-7240
Commit: 9097e2efe4c92d83c8fab88dc11be84505a6cab5
Parents: b371c56
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu Oct 13 10:52:13 2016 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Oct 13 10:52:28 2016 -0700

----------------------------------------------------------------------
 .../authentication/server/KerberosAuthenticationHandler.java  | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9097e2ef/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index c6d1881..07c2a31 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -343,8 +343,6 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       authorization = authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
       final Base64 base64 = new Base64(0);
       final byte[] clientToken = base64.decode(authorization);
-      final String serverName = InetAddress.getByName(request.getServerName())
-                                           .getCanonicalHostName();
       try {
         token = Subject.doAs(serverSubject, new PrivilegedExceptionAction<AuthenticationToken>() {
 
@@ -354,10 +352,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
             GSSContext gssContext = null;
             GSSCredential gssCreds = null;
             try {
-              gssCreds = gssManager.createCredential(
-                  gssManager.createName(
-                      KerberosUtil.getServicePrincipal("HTTP", serverName),
-                      KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+              gssCreds = gssManager.createCredential(null,
                   GSSCredential.INDEFINITE_LIFETIME,
                   new Oid[]{
                     KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/51] [abbrv] hadoop git commit: HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)

Posted by ae...@apache.org.
HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96b12662
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96b12662
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96b12662

Branch: refs/heads/HDFS-7240
Commit: 96b12662ea76e3ded4ef13944fc8df206cfb4613
Parents: 0773ffd
Author: Lei Xu <le...@apache.org>
Authored: Mon Oct 10 15:28:19 2016 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Mon Oct 10 15:30:03 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/common/Storage.java      |  22 ++
 .../server/datanode/BlockPoolSliceStorage.java  |  20 +-
 .../hdfs/server/datanode/BlockScanner.java      |   8 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  34 +-
 .../hdfs/server/datanode/DataStorage.java       |  34 +-
 .../hdfs/server/datanode/DirectoryScanner.java  | 320 +------------------
 .../hdfs/server/datanode/DiskBalancer.java      |  25 +-
 .../hdfs/server/datanode/LocalReplica.java      |   2 +-
 .../hdfs/server/datanode/ReplicaInfo.java       |   2 +-
 .../hdfs/server/datanode/StorageLocation.java   |  32 +-
 .../hdfs/server/datanode/VolumeScanner.java     |  27 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   5 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  | 234 +++++++++++++-
 .../impl/FsDatasetAsyncDiskService.java         |  40 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 136 ++++----
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 233 ++++++++++++--
 .../fsdataset/impl/FsVolumeImplBuilder.java     |  65 ++++
 .../datanode/fsdataset/impl/FsVolumeList.java   |  44 +--
 .../impl/RamDiskAsyncLazyPersistService.java    |  79 +++--
 .../fsdataset/impl/VolumeFailureInfo.java       |  13 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +-
 .../TestNameNodePrunesMissingStorages.java      |  15 +-
 .../server/datanode/SimulatedFSDataset.java     |  46 ++-
 .../hdfs/server/datanode/TestBlockScanner.java  |   3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java    |  15 +-
 .../datanode/TestDataNodeVolumeFailure.java     |  12 +-
 .../TestDataNodeVolumeFailureReporting.java     |  10 +
 .../server/datanode/TestDirectoryScanner.java   |  76 +++--
 .../hdfs/server/datanode/TestDiskError.java     |   2 +-
 .../extdataset/ExternalDatasetImpl.java         |  10 +-
 .../datanode/extdataset/ExternalVolumeImpl.java |  44 ++-
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |   9 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       |  69 ++--
 .../fsdataset/impl/TestFsVolumeList.java        |  83 +++--
 .../TestDiskBalancerWithMockMover.java          |   4 +-
 35 files changed, 1062 insertions(+), 713 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 9218e9d..e55de35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.apache.hadoop.util.ToolRunner;
@@ -269,11 +270,17 @@ public abstract class Storage extends StorageInfo {
 
     private String storageUuid = null;      // Storage directory identifier.
     
+    private final StorageLocation location;
     public StorageDirectory(File dir) {
       // default dirType is null
       this(dir, null, false);
     }
     
+    public StorageDirectory(StorageLocation location) {
+      // default dirType is null
+      this(location.getFile(), null, false, location);
+    }
+
     public StorageDirectory(File dir, StorageDirType dirType) {
       this(dir, dirType, false);
     }
@@ -294,11 +301,22 @@ public abstract class Storage extends StorageInfo {
      *          disables locking on the storage directory, false enables locking
      */
     public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) {
+      this(dir, dirType, isShared, null);
+    }
+
+    public StorageDirectory(File dir, StorageDirType dirType,
+        boolean isShared, StorageLocation location) {
       this.root = dir;
       this.lock = null;
       this.dirType = dirType;
       this.isShared = isShared;
+      this.location = location;
+      assert location == null ||
+          dir.getAbsolutePath().startsWith(
+              location.getFile().getAbsolutePath()):
+            "The storage location and directory should be equal";
     }
+
     
     /**
      * Get root directory of this storage
@@ -861,6 +879,10 @@ public abstract class Storage extends StorageInfo {
       }
       return false;
     }
+
+    public StorageLocation getStorageLocation() {
+      return location;
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index fd89611..e3b6da1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -147,10 +147,10 @@ public class BlockPoolSliceStorage extends Storage {
    * @throws IOException
    */
   private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
-      File dataDir, StartupOption startOpt,
+      File dataDir, StorageLocation location, StartupOption startOpt,
       List<Callable<StorageDirectory>> callables, Configuration conf)
           throws IOException {
-    StorageDirectory sd = new StorageDirectory(dataDir, null, true);
+    StorageDirectory sd = new StorageDirectory(dataDir, null, true, location);
     try {
       StorageState curState = sd.analyzeStorage(startOpt, this, true);
       // sd is locked but not opened
@@ -208,9 +208,9 @@ public class BlockPoolSliceStorage extends Storage {
    * @throws IOException on error
    */
   List<StorageDirectory> loadBpStorageDirectories(NamespaceInfo nsInfo,
-      Collection<File> dataDirs, StartupOption startOpt,
-      List<Callable<StorageDirectory>> callables, Configuration conf)
-          throws IOException {
+      Collection<File> dataDirs, StorageLocation location,
+      StartupOption startOpt, List<Callable<StorageDirectory>> callables,
+      Configuration conf) throws IOException {
     List<StorageDirectory> succeedDirs = Lists.newArrayList();
     try {
       for (File dataDir : dataDirs) {
@@ -220,7 +220,7 @@ public class BlockPoolSliceStorage extends Storage {
                   "attempt to load an used block storage: " + dataDir);
         }
         final StorageDirectory sd = loadStorageDirectory(
-            nsInfo, dataDir, startOpt, callables, conf);
+            nsInfo, dataDir, location, startOpt, callables, conf);
         succeedDirs.add(sd);
       }
     } catch (IOException e) {
@@ -244,12 +244,12 @@ public class BlockPoolSliceStorage extends Storage {
    * @throws IOException on error
    */
   List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo,
-      Collection<File> dataDirs, StartupOption startOpt,
-      List<Callable<StorageDirectory>> callables, Configuration conf)
-          throws IOException {
+      Collection<File> dataDirs, StorageLocation location,
+      StartupOption startOpt, List<Callable<StorageDirectory>> callables,
+      Configuration conf) throws IOException {
     LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
     final List<StorageDirectory> loaded = loadBpStorageDirectories(
-        nsInfo, dataDirs, startOpt, callables, conf);
+        nsInfo, dataDirs, location, startOpt, callables, conf);
     for (StorageDirectory sd : loaded) {
       addStorageDir(sd);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 456dcc1..21484fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -201,17 +201,17 @@ public class BlockScanner {
       FsVolumeSpi volume = ref.getVolume();
       if (!isEnabled()) {
         LOG.debug("Not adding volume scanner for {}, because the block " +
-            "scanner is disabled.", volume.getBasePath());
+            "scanner is disabled.", volume);
         return;
       }
       VolumeScanner scanner = scanners.get(volume.getStorageID());
       if (scanner != null) {
         LOG.error("Already have a scanner for volume {}.",
-            volume.getBasePath());
+            volume);
         return;
       }
       LOG.debug("Adding scanner for volume {} (StorageID {})",
-          volume.getBasePath(), volume.getStorageID());
+          volume, volume.getStorageID());
       scanner = new VolumeScanner(conf, datanode, ref);
       scanner.start();
       scanners.put(volume.getStorageID(), scanner);
@@ -245,7 +245,7 @@ public class BlockScanner {
       return;
     }
     LOG.info("Removing scanner for volume {} (StorageID {})",
-        volume.getBasePath(), volume.getStorageID());
+        volume, volume.getStorageID());
     scanner.shutdown();
     scanners.remove(volume.getStorageID());
     Uninterruptibles.joinUninterruptibly(scanner, 5, TimeUnit.MINUTES);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index dd7e426..cb8e308 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -58,7 +58,6 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -78,7 +77,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -791,11 +789,7 @@ public class DataNode extends ReconfigurableBase
     if (locations.isEmpty()) {
       return;
     }
-    Set<File> volumesToRemove = new HashSet<>();
-    for (StorageLocation loc : locations) {
-      volumesToRemove.add(loc.getFile().getAbsoluteFile());
-    }
-    removeVolumes(volumesToRemove, true);
+    removeVolumes(locations, true);
   }
 
   /**
@@ -814,26 +808,22 @@ public class DataNode extends ReconfigurableBase
    * @throws IOException
    */
   private synchronized void removeVolumes(
-      final Set<File> absoluteVolumePaths, boolean clearFailure)
+      final Collection<StorageLocation> storageLocations, boolean clearFailure)
       throws IOException {
-    for (File vol : absoluteVolumePaths) {
-      Preconditions.checkArgument(vol.isAbsolute());
-    }
-
-    if (absoluteVolumePaths.isEmpty()) {
+    if (storageLocations.isEmpty()) {
       return;
     }
 
     LOG.info(String.format("Deactivating volumes (clear failure=%b): %s",
-        clearFailure, Joiner.on(",").join(absoluteVolumePaths)));
+        clearFailure, Joiner.on(",").join(storageLocations)));
 
     IOException ioe = null;
     // Remove volumes and block infos from FsDataset.
-    data.removeVolumes(absoluteVolumePaths, clearFailure);
+    data.removeVolumes(storageLocations, clearFailure);
 
     // Remove volumes from DataStorage.
     try {
-      storage.removeVolumes(absoluteVolumePaths);
+      storage.removeVolumes(storageLocations);
     } catch (IOException e) {
       ioe = e;
     }
@@ -841,7 +831,7 @@ public class DataNode extends ReconfigurableBase
     // Set configuration and dataDirs to reflect volume changes.
     for (Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext(); ) {
       StorageLocation loc = it.next();
-      if (absoluteVolumePaths.contains(loc.getFile().getAbsoluteFile())) {
+      if (storageLocations.contains(loc)) {
         it.remove();
       }
     }
@@ -3242,18 +3232,18 @@ public class DataNode extends ReconfigurableBase
    * Check the disk error
    */
   private void checkDiskError() {
-    Set<File> unhealthyDataDirs = data.checkDataDir();
-    if (unhealthyDataDirs != null && !unhealthyDataDirs.isEmpty()) {
+    Set<StorageLocation> unhealthyLocations = data.checkDataDir();
+    if (unhealthyLocations != null && !unhealthyLocations.isEmpty()) {
       try {
         // Remove all unhealthy volumes from DataNode.
-        removeVolumes(unhealthyDataDirs, false);
+        removeVolumes(unhealthyLocations, false);
       } catch (IOException e) {
         LOG.warn("Error occurred when removing unhealthy storage dirs: "
             + e.getMessage(), e);
       }
       StringBuilder sb = new StringBuilder("DataNode failed volumes:");
-      for (File dataDir : unhealthyDataDirs) {
-        sb.append(dataDir.getAbsolutePath() + ";");
+      for (StorageLocation location : unhealthyLocations) {
+        sb.append(location + ";");
       }
       handleDiskError(sb.toString());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 7e620c2..7c9bea5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -263,9 +263,10 @@ public class DataStorage extends Storage {
   }
 
   private StorageDirectory loadStorageDirectory(DataNode datanode,
-      NamespaceInfo nsInfo, File dataDir, StartupOption startOpt,
-      List<Callable<StorageDirectory>> callables) throws IOException {
-    StorageDirectory sd = new StorageDirectory(dataDir, null, false);
+      NamespaceInfo nsInfo, File dataDir, StorageLocation location,
+      StartupOption startOpt, List<Callable<StorageDirectory>> callables)
+          throws IOException {
+    StorageDirectory sd = new StorageDirectory(dataDir, null, false, location);
     try {
       StorageState curState = sd.analyzeStorage(startOpt, this, true);
       // sd is locked but not opened
@@ -310,7 +311,7 @@ public class DataStorage extends Storage {
    * builder later.
    *
    * @param datanode DataNode object.
-   * @param volume the root path of a storage directory.
+   * @param location the StorageLocation for the storage directory.
    * @param nsInfos an array of namespace infos.
    * @return a VolumeBuilder that holds the metadata of this storage directory
    * and can be added to DataStorage later.
@@ -318,8 +319,10 @@ public class DataStorage extends Storage {
    *
    * Note that if there is IOException, the state of DataStorage is not modified.
    */
-  public VolumeBuilder prepareVolume(DataNode datanode, File volume,
-      List<NamespaceInfo> nsInfos) throws IOException {
+  public VolumeBuilder prepareVolume(DataNode datanode,
+      StorageLocation location, List<NamespaceInfo> nsInfos)
+          throws IOException {
+    File volume = location.getFile();
     if (containsStorageDir(volume)) {
       final String errorMessage = "Storage directory is in use";
       LOG.warn(errorMessage + ".");
@@ -327,7 +330,8 @@ public class DataStorage extends Storage {
     }
 
     StorageDirectory sd = loadStorageDirectory(
-        datanode, nsInfos.get(0), volume, StartupOption.HOTSWAP, null);
+        datanode, nsInfos.get(0), volume, location,
+        StartupOption.HOTSWAP, null);
     VolumeBuilder builder =
         new VolumeBuilder(this, sd);
     for (NamespaceInfo nsInfo : nsInfos) {
@@ -338,7 +342,8 @@ public class DataStorage extends Storage {
 
       final BlockPoolSliceStorage bpStorage = getBlockPoolSliceStorage(nsInfo);
       final List<StorageDirectory> dirs = bpStorage.loadBpStorageDirectories(
-          nsInfo, bpDataDirs, StartupOption.HOTSWAP, null, datanode.getConf());
+          nsInfo, bpDataDirs, location, StartupOption.HOTSWAP,
+          null, datanode.getConf());
       builder.addBpStorageDirectories(nsInfo.getBlockPoolID(), dirs);
     }
     return builder;
@@ -407,7 +412,7 @@ public class DataStorage extends Storage {
           final List<Callable<StorageDirectory>> callables
               = Lists.newArrayList();
           final StorageDirectory sd = loadStorageDirectory(
-              datanode, nsInfo, root, startOpt, callables);
+              datanode, nsInfo, root, dataDir, startOpt, callables);
           if (callables.isEmpty()) {
             addStorageDir(sd);
             success.add(dataDir);
@@ -458,7 +463,8 @@ public class DataStorage extends Storage {
 
         final List<Callable<StorageDirectory>> callables = Lists.newArrayList();
         final List<StorageDirectory> dirs = bpStorage.recoverTransitionRead(
-            nsInfo, bpDataDirs, startOpt, callables, datanode.getConf());
+            nsInfo, bpDataDirs, dataDir, startOpt,
+            callables, datanode.getConf());
         if (callables.isEmpty()) {
           for(StorageDirectory sd : dirs) {
             success.add(sd);
@@ -498,9 +504,10 @@ public class DataStorage extends Storage {
    * @param dirsToRemove a set of storage directories to be removed.
    * @throws IOException if I/O error when unlocking storage directory.
    */
-  synchronized void removeVolumes(final Set<File> dirsToRemove)
+  synchronized void removeVolumes(
+      final Collection<StorageLocation> storageLocations)
       throws IOException {
-    if (dirsToRemove.isEmpty()) {
+    if (storageLocations.isEmpty()) {
       return;
     }
 
@@ -508,7 +515,8 @@ public class DataStorage extends Storage {
     for (Iterator<StorageDirectory> it = this.storageDirs.iterator();
          it.hasNext(); ) {
       StorageDirectory sd = it.next();
-      if (dirsToRemove.contains(sd.getRoot())) {
+      StorageLocation sdLocation = sd.getStorageLocation();
+      if (storageLocations.contains(sdLocation)) {
         // Remove the block pool level storage first.
         for (Map.Entry<String, BlockPoolSliceStorage> entry :
             this.bpStorageMap.entrySet()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index c50bfaf..58071dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -22,7 +22,6 @@ import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -37,9 +36,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -47,10 +43,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.Time;
@@ -209,200 +204,6 @@ public class DirectoryScanner implements Runnable {
     }
   }
 
-  /**
-   * Tracks the files and other information related to a block on the disk
-   * Missing file is indicated by setting the corresponding member
-   * to null.
-   * 
-   * Because millions of these structures may be created, we try to save
-   * memory here.  So instead of storing full paths, we store path suffixes.
-   * The block file, if it exists, will have a path like this:
-   * <volume_base_path>/<block_path>
-   * So we don't need to store the volume path, since we already know what the
-   * volume is.
-   * 
-   * The metadata file, if it exists, will have a path like this:
-   * <volume_base_path>/<block_path>_<genstamp>.meta
-   * So if we have a block file, there isn't any need to store the block path
-   * again.
-   * 
-   * The accessor functions take care of these manipulations.
-   */
-  static class ScanInfo implements Comparable<ScanInfo> {
-    private final long blockId;
-    
-    /**
-     * The block file path, relative to the volume's base directory.
-     * If there was no block file found, this may be null. If 'vol'
-     * is null, then this is the full path of the block file.
-     */
-    private final String blockSuffix;
-    
-    /**
-     * The suffix of the meta file path relative to the block file.
-     * If blockSuffix is null, then this will be the entire path relative
-     * to the volume base directory, or an absolute path if vol is also
-     * null.
-     */
-    private final String metaSuffix;
-
-    private final FsVolumeSpi volume;
-
-    /**
-     * Get the file's length in async block scan
-     */
-    private final long blockFileLength;
-
-    private final static Pattern CONDENSED_PATH_REGEX =
-        Pattern.compile("(?<!^)(\\\\|/){2,}");
-    
-    private final static String QUOTED_FILE_SEPARATOR = 
-        Matcher.quoteReplacement(File.separator);
-    
-    /**
-     * Get the most condensed version of the path.
-     *
-     * For example, the condensed version of /foo//bar is /foo/bar
-     * Unlike {@link File#getCanonicalPath()}, this will never perform I/O
-     * on the filesystem.
-     *
-     * @param path the path to condense
-     * @return the condensed path
-     */
-    private static String getCondensedPath(String path) {
-      return CONDENSED_PATH_REGEX.matcher(path).
-          replaceAll(QUOTED_FILE_SEPARATOR);
-    }
-
-    /**
-     * Get a path suffix.
-     *
-     * @param f            The file to get the suffix for.
-     * @param prefix       The prefix we're stripping off.
-     *
-     * @return             A suffix such that prefix + suffix = path to f
-     */
-    private static String getSuffix(File f, String prefix) {
-      String fullPath = getCondensedPath(f.getAbsolutePath());
-      if (fullPath.startsWith(prefix)) {
-        return fullPath.substring(prefix.length());
-      }
-      throw new RuntimeException(prefix + " is not a prefix of " + fullPath);
-    }
-
-    /**
-     * Create a ScanInfo object for a block. This constructor will examine
-     * the block data and meta-data files.
-     *
-     * @param blockId the block ID
-     * @param blockFile the path to the block data file
-     * @param metaFile the path to the block meta-data file
-     * @param vol the volume that contains the block
-     */
-    ScanInfo(long blockId, File blockFile, File metaFile, FsVolumeSpi vol) {
-      this.blockId = blockId;
-      String condensedVolPath = vol == null ? null :
-        getCondensedPath(vol.getBasePath());
-      this.blockSuffix = blockFile == null ? null :
-        getSuffix(blockFile, condensedVolPath);
-      this.blockFileLength = (blockFile != null) ? blockFile.length() : 0; 
-      if (metaFile == null) {
-        this.metaSuffix = null;
-      } else if (blockFile == null) {
-        this.metaSuffix = getSuffix(metaFile, condensedVolPath);
-      } else {
-        this.metaSuffix = getSuffix(metaFile,
-            condensedVolPath + blockSuffix);
-      }
-      this.volume = vol;
-    }
-
-    /**
-     * Returns the block data file.
-     *
-     * @return the block data file
-     */
-    File getBlockFile() {
-      return (blockSuffix == null) ? null :
-        new File(volume.getBasePath(), blockSuffix);
-    }
-
-    /**
-     * Return the length of the data block. The length returned is the length
-     * cached when this object was created.
-     *
-     * @return the length of the data block
-     */
-    long getBlockFileLength() {
-      return blockFileLength;
-    }
-
-    /**
-     * Returns the block meta data file or null if there isn't one.
-     *
-     * @return the block meta data file
-     */
-    File getMetaFile() {
-      if (metaSuffix == null) {
-        return null;
-      } else if (blockSuffix == null) {
-        return new File(volume.getBasePath(), metaSuffix);
-      } else {
-        return new File(volume.getBasePath(), blockSuffix + metaSuffix);
-      }
-    }
-
-    /**
-     * Returns the block ID.
-     *
-     * @return the block ID
-     */
-    long getBlockId() {
-      return blockId;
-    }
-
-    /**
-     * Returns the volume that contains the block that this object describes.
-     *
-     * @return the volume
-     */
-    FsVolumeSpi getVolume() {
-      return volume;
-    }
-
-    @Override // Comparable
-    public int compareTo(ScanInfo b) {
-      if (blockId < b.blockId) {
-        return -1;
-      } else if (blockId == b.blockId) {
-        return 0;
-      } else {
-        return 1;
-      }
-    }
-
-    @Override // Object
-    public boolean equals(Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (!(o instanceof ScanInfo)) {
-        return false;
-      }
-      return blockId == ((ScanInfo) o).blockId;
-    }
-
-    @Override // Object
-    public int hashCode() {
-      return (int)(blockId^(blockId>>>32));
-    }
-
-    public long getGenStamp() {
-      return metaSuffix != null ? Block.getGenerationStamp(
-          getMetaFile().getName()) : 
-            HdfsConstants.GRANDFATHER_GENERATION_STAMP;
-    }
-  }
 
   /**
    * Create a new directory scanner, but don't cycle it running yet.
@@ -644,7 +445,7 @@ public class DirectoryScanner implements Runnable {
             // There may be multiple on-disk records for the same block, don't increment
             // the memory record pointer if so.
             ScanInfo nextInfo = blockpoolReport[Math.min(d, blockpoolReport.length - 1)];
-            if (nextInfo.getBlockId() != info.blockId) {
+            if (nextInfo.getBlockId() != info.getBlockId()) {
               ++m;
             }
           } else {
@@ -763,19 +564,6 @@ public class DirectoryScanner implements Runnable {
   }
 
   /**
-   * Helper method to determine if a file name is consistent with a block.
-   * meta-data file
-   *
-   * @param blockId the block ID
-   * @param metaFile the file to check
-   * @return whether the file name is a block meta-data file name
-   */
-  private static boolean isBlockMetaFile(String blockId, String metaFile) {
-    return metaFile.startsWith(blockId)
-        && metaFile.endsWith(Block.METADATA_EXTENSION);
-  }
-
-  /**
    * The ReportCompiler class encapsulates the process of searching a datanode's
    * disks for block information.  It operates by performing a DFS of the
    * volume to discover block information.
@@ -784,7 +572,7 @@ public class DirectoryScanner implements Runnable {
    * ScanInfo object for it and adds that object to its report list.  The report
    * list is returned by the {@link #call()} method.
    */
-  private class ReportCompiler implements Callable<ScanInfoPerBlockPool> {
+  public class ReportCompiler implements Callable<ScanInfoPerBlockPool> {
     private final FsVolumeSpi volume;
     private final DataNode datanode;
     // Variable for tracking time spent running for throttling purposes
@@ -816,14 +604,12 @@ public class DirectoryScanner implements Runnable {
       ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
       for (String bpid : bpList) {
         LinkedList<ScanInfo> report = new LinkedList<>();
-        File bpFinalizedDir = volume.getFinalizedDir(bpid);
 
         perfTimer.start();
         throttleTimer.start();
 
         try {
-          result.put(bpid,
-              compileReport(volume, bpFinalizedDir, bpFinalizedDir, report));
+          result.put(bpid, volume.compileReport(bpid, report, this));
         } catch (InterruptedException ex) {
           // Exit quickly and flag the scanner to do the same
           result = null;
@@ -834,106 +620,12 @@ public class DirectoryScanner implements Runnable {
     }
 
     /**
-     * Compile a list of {@link ScanInfo} for the blocks in the directory
-     * given by {@code dir}.
-     *
-     * @param vol the volume that contains the directory to scan
-     * @param bpFinalizedDir the root directory of the directory to scan
-     * @param dir the directory to scan
-     * @param report the list onto which blocks reports are placed
-     */
-    private LinkedList<ScanInfo> compileReport(FsVolumeSpi vol,
-        File bpFinalizedDir, File dir, LinkedList<ScanInfo> report)
-        throws InterruptedException {
-
-      throttle();
-
-      List <String> fileNames;
-      try {
-        fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE);
-      } catch (IOException ioe) {
-        LOG.warn("Exception occured while compiling report: ", ioe);
-        // Initiate a check on disk failure.
-        datanode.checkDiskErrorAsync();
-        // Ignore this directory and proceed.
-        return report;
-      }
-      Collections.sort(fileNames);
-
-      /*
-       * Assumption: In the sorted list of files block file appears immediately
-       * before block metadata file. This is true for the current naming
-       * convention for block file blk_<blockid> and meta file
-       * blk_<blockid>_<genstamp>.meta
-       */
-      for (int i = 0; i < fileNames.size(); i++) {
-        // Make sure this thread can make a timely exit. With a low throttle
-        // rate, completing a run can take a looooong time.
-        if (Thread.interrupted()) {
-          throw new InterruptedException();
-        }
-
-        File file = new File(dir, fileNames.get(i));
-        if (file.isDirectory()) {
-          compileReport(vol, bpFinalizedDir, file, report);
-          continue;
-        }
-        if (!Block.isBlockFilename(file)) {
-          if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, file.getName())) {
-            long blockId = Block.getBlockId(file.getName());
-            verifyFileLocation(file.getParentFile(), bpFinalizedDir,
-                blockId);
-            report.add(new ScanInfo(blockId, null, file, vol));
-          }
-          continue;
-        }
-        File blockFile = file;
-        long blockId = Block.filename2id(file.getName());
-        File metaFile = null;
-
-        // Skip all the files that start with block name until
-        // getting to the metafile for the block
-        while (i + 1 < fileNames.size()) {
-          File blkMetaFile = new File(dir, fileNames.get(i + 1));
-          if (!(blkMetaFile.isFile()
-              && blkMetaFile.getName().startsWith(blockFile.getName()))) {
-            break;
-          }
-          i++;
-          if (isBlockMetaFile(blockFile.getName(), blkMetaFile.getName())) {
-            metaFile = blkMetaFile;
-            break;
-          }
-        }
-        verifyFileLocation(blockFile, bpFinalizedDir, blockId);
-        report.add(new ScanInfo(blockId, blockFile, metaFile, vol));
-      }
-      return report;
-    }
-
-    /**
-     * Verify whether the actual directory location of block file has the
-     * expected directory path computed using its block ID.
-     */
-    private void verifyFileLocation(File actualBlockFile,
-        File bpFinalizedDir, long blockId) {
-      File expectedBlockDir =
-          DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
-      File actualBlockDir = actualBlockFile.getParentFile();
-      if (actualBlockDir.compareTo(expectedBlockDir) != 0) {
-        LOG.warn("Block: " + blockId +
-            " found in invalid directory.  Expected directory: " +
-            expectedBlockDir + ".  Actual directory: " + actualBlockDir);
-      }
-    }
-
-    /**
      * Called by the thread before each potential disk scan so that a pause
      * can be optionally inserted to limit the number of scans per second.
      * The limit is controlled by
      * {@link DFSConfigKeys#DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY}.
      */
-    private void throttle() throws InterruptedException {
+    public void throttle() throws InterruptedException {
       accumulateTimeRunning();
 
       if ((throttleLimitMsPerSec < 1000) &&
@@ -963,7 +655,7 @@ public class DirectoryScanner implements Runnable {
     }
   }
 
-  private enum BlockDirFilter implements FilenameFilter {
+  public enum BlockDirFilter implements FilenameFilter {
     INSTANCE;
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index e7e9105..0c75001 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -500,7 +500,8 @@ public class DiskBalancer {
         references = this.dataset.getFsVolumeReferences();
         for (int ndx = 0; ndx < references.size(); ndx++) {
           FsVolumeSpi vol = references.get(ndx);
-          storageIDToVolBasePathMap.put(vol.getStorageID(), vol.getBasePath());
+          storageIDToVolBasePathMap.put(vol.getStorageID(),
+              vol.getBaseURI().getPath());
         }
         references.close();
       }
@@ -1023,7 +1024,7 @@ public class DiskBalancer {
         openPoolIters(source, poolIters);
         if (poolIters.size() == 0) {
           LOG.error("No block pools found on volume. volume : {}. Exiting.",
-              source.getBasePath());
+              source.getBaseURI());
           return;
         }
 
@@ -1033,17 +1034,16 @@ public class DiskBalancer {
             // Check for the max error count constraint.
             if (item.getErrorCount() > getMaxError(item)) {
               LOG.error("Exceeded the max error count. source {}, dest: {} " +
-                      "error count: {}", source.getBasePath(),
-                  dest.getBasePath(), item.getErrorCount());
-              this.setExitFlag();
-              continue;
+                      "error count: {}", source.getBaseURI(),
+                  dest.getBaseURI(), item.getErrorCount());
+              break;
             }
 
             // Check for the block tolerance constraint.
             if (isCloseEnough(item)) {
               LOG.info("Copy from {} to {} done. copied {} bytes and {} " +
                       "blocks.",
-                  source.getBasePath(), dest.getBasePath(),
+                  source.getBaseURI(), dest.getBaseURI(),
                   item.getBytesCopied(), item.getBlocksCopied());
               this.setExitFlag();
               continue;
@@ -1053,7 +1053,7 @@ public class DiskBalancer {
             // we are not able to find any blocks to copy.
             if (block == null) {
               LOG.error("No source blocks, exiting the copy. Source: {}, " +
-                  "Dest:{}", source.getBasePath(), dest.getBasePath());
+                  "Dest:{}", source.getBaseURI(), dest.getBaseURI());
               this.setExitFlag();
               continue;
             }
@@ -1081,14 +1081,13 @@ public class DiskBalancer {
               // exiting here.
               LOG.error("Destination volume: {} does not have enough space to" +
                   " accommodate a block. Block Size: {} Exiting from" +
-                  " copyBlocks.", dest.getBasePath(), block.getNumBytes());
-              this.setExitFlag();
-              continue;
+                  " copyBlocks.", dest.getBaseURI(), block.getNumBytes());
+              break;
             }
 
             LOG.debug("Moved block with size {} from  {} to {}",
-                block.getNumBytes(), source.getBasePath(),
-                dest.getBasePath());
+                block.getNumBytes(), source.getBaseURI(),
+                dest.getBaseURI());
 
             // Check for the max throughput constraint.
             // We sleep here to keep the promise that we will not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
index cbfc9a5..58febf0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.io.IOUtils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index cbbafc3..dc63238 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -25,8 +25,8 @@ import java.net.URI;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.util.LightWeightResizableGSet;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
index 3162c5c..75abc1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.util.StringUtils;
 
+
 /**
  * Encapsulates the URI and storage medium that together describe a
  * storage directory.
@@ -37,7 +38,7 @@ import org.apache.hadoop.util.StringUtils;
  *
  */
 @InterfaceAudience.Private
-public class StorageLocation {
+public class StorageLocation implements Comparable<StorageLocation>{
   final StorageType storageType;
   final File file;
 
@@ -104,16 +105,37 @@ public class StorageLocation {
 
   @Override
   public boolean equals(Object obj) {
-    if (obj == this) {
-      return true;
-    } else if (obj == null || !(obj instanceof StorageLocation)) {
+    if (obj == null || !(obj instanceof StorageLocation)) {
       return false;
     }
-    return toString().equals(obj.toString());
+    int comp = compareTo((StorageLocation) obj);
+    return comp == 0;
   }
 
   @Override
   public int hashCode() {
     return toString().hashCode();
   }
+
+  @Override
+  public int compareTo(StorageLocation obj) {
+    if (obj == this) {
+      return 0;
+    } else if (obj == null) {
+      return -1;
+    }
+
+    StorageLocation otherStorage = (StorageLocation) obj;
+    if (this.getFile() != null && otherStorage.getFile() != null) {
+      return this.getFile().getAbsolutePath().compareTo(
+          otherStorage.getFile().getAbsolutePath());
+    } else if (this.getFile() == null && otherStorage.getFile() == null) {
+      return this.storageType.compareTo(otherStorage.getStorageType());
+    } else if (this.getFile() == null) {
+      return -1;
+    } else {
+      return 1;
+    }
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 3416b53..1e44fb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -217,7 +217,7 @@ public class VolumeScanner extends Thread {
 
   public void printStats(StringBuilder p) {
     p.append(String.format("Block scanner information for volume %s with base" +
-        " path %s%n", volume.getStorageID(), volume.getBasePath()));
+        " path %s%n", volume.getStorageID(), volume));
     synchronized (stats) {
       p.append(String.format("Bytes verified in last hour       : %57d%n",
           stats.bytesScannedInPastHour));
@@ -253,20 +253,20 @@ public class VolumeScanner extends Thread {
 
     public void setup(VolumeScanner scanner) {
       LOG.trace("Starting VolumeScanner {}",
-          scanner.volume.getBasePath());
+          scanner.volume);
       this.scanner = scanner;
     }
 
     public void handle(ExtendedBlock block, IOException e) {
       FsVolumeSpi volume = scanner.volume;
       if (e == null) {
-        LOG.trace("Successfully scanned {} on {}", block, volume.getBasePath());
+        LOG.trace("Successfully scanned {} on {}", block, volume);
         return;
       }
       // If the block does not exist anymore, then it's not an error.
       if (!volume.getDataset().contains(block)) {
         LOG.debug("Volume {}: block {} is no longer in the dataset.",
-            volume.getBasePath(), block);
+            volume, block);
         return;
       }
       // If the block exists, the exception may due to a race with write:
@@ -278,11 +278,10 @@ public class VolumeScanner extends Thread {
       if (e instanceof FileNotFoundException ) {
         LOG.info("Volume {}: verification failed for {} because of " +
                 "FileNotFoundException.  This may be due to a race with write.",
-            volume.getBasePath(), block);
+            volume, block);
         return;
       }
-      LOG.warn("Reporting bad " + block + " with volume "
-          + volume.getBasePath(), e);
+      LOG.warn("Reporting bad {} on {}", block, volume);
       try {
         scanner.datanode.reportBadBlocks(block, volume);
       } catch (IOException ie) {
@@ -305,7 +304,7 @@ public class VolumeScanner extends Thread {
       handler = new ScanResultHandler();
     }
     this.resultHandler = handler;
-    setName("VolumeScannerThread(" + volume.getBasePath() + ")");
+    setName("VolumeScannerThread(" + volume + ")");
     setDaemon(true);
   }
 
@@ -376,7 +375,7 @@ public class VolumeScanner extends Thread {
       BlockIterator iter = blockIters.get(idx);
       if (!iter.atEnd()) {
         LOG.info("Now scanning bpid {} on volume {}",
-            iter.getBlockPoolId(), volume.getBasePath());
+            iter.getBlockPoolId(), volume);
         curBlockIter = iter;
         return 0L;
       }
@@ -385,7 +384,7 @@ public class VolumeScanner extends Thread {
       if (waitMs <= 0) {
         iter.rewind();
         LOG.info("Now rescanning bpid {} on volume {}, after more than " +
-            "{} hour(s)", iter.getBlockPoolId(), volume.getBasePath(),
+            "{} hour(s)", iter.getBlockPoolId(), volume,
             TimeUnit.HOURS.convert(conf.scanPeriodMs, TimeUnit.MILLISECONDS));
         curBlockIter = iter;
         return 0L;
@@ -416,16 +415,16 @@ public class VolumeScanner extends Thread {
           cblock.getBlockPoolId(), cblock.getBlockId());
       if (b == null) {
         LOG.info("Replica {} was not found in the VolumeMap for volume {}",
-            cblock, volume.getBasePath());
+            cblock, volume);
       } else {
         block = new ExtendedBlock(cblock.getBlockPoolId(), b);
       }
     } catch (FileNotFoundException e) {
       LOG.info("FileNotFoundException while finding block {} on volume {}",
-          cblock, volume.getBasePath());
+          cblock, volume);
     } catch (IOException e) {
       LOG.warn("I/O error while finding block {} on volume {}",
-            cblock, volume.getBasePath());
+            cblock, volume);
     }
     if (block == null) {
       return -1; // block not found.
@@ -642,7 +641,7 @@ public class VolumeScanner extends Thread {
 
   @Override
   public String toString() {
-    return "VolumeScanner(" + volume.getBasePath() +
+    return "VolumeScanner(" + volume +
         ", " + volume.getStorageID() + ")";
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index b75ed5b..f2ffa83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -27,6 +27,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -206,7 +207,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * @param clearFailure set true to clear the failure information about the
    *                     volumes.
    */
-  void removeVolumes(Set<File> volumes, boolean clearFailure);
+  void removeVolumes(Collection<StorageLocation> volumes, boolean clearFailure);
 
   /** @return a storage with the given storage ID */
   DatanodeStorage getStorage(final String storageUuid);
@@ -482,7 +483,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
      * Check if all the data directories are healthy
      * @return A set of unhealthy data directories.
      */
-  Set<File> checkDataDir();
+  Set<StorageLocation> checkDataDir();
 
   /**
    * Shutdown the FSDataset

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 9e16121..dbba31d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -20,10 +20,20 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset;
 import java.io.Closeable;
 import java.io.File;
 import java.io.IOException;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
+import java.util.LinkedList;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 
 /**
  * This is an interface for the underlying volume.
@@ -48,14 +58,14 @@ public interface FsVolumeSpi {
   long getAvailable() throws IOException;
 
   /** @return the base path to the volume */
-  String getBasePath();
+  URI getBaseURI();
 
-  /** @return the path to the volume */
-  String getPath(String bpid) throws IOException;
+  DF getUsageStats(Configuration conf);
 
-  /** @return the directory for the finalized blocks in the block pool. */
-  File getFinalizedDir(String bpid) throws IOException;
-  
+  /** @return the {@link StorageLocation} to the volume */
+  StorageLocation getStorageLocation();
+
+  /** @return the {@link StorageType} of the volume */
   StorageType getStorageType();
 
   /** Returns true if the volume is NOT backed by persistent storage. */
@@ -186,4 +196,216 @@ public interface FsVolumeSpi {
    * Get the FSDatasetSpi which this volume is a part of.
    */
   FsDatasetSpi getDataset();
+
+  /**
+   * Tracks the files and other information related to a block on the disk
+   * Missing file is indicated by setting the corresponding member
+   * to null.
+   *
+   * Because millions of these structures may be created, we try to save
+   * memory here.  So instead of storing full paths, we store path suffixes.
+   * The block file, if it exists, will have a path like this:
+   * <volume_base_path>/<block_path>
+   * So we don't need to store the volume path, since we already know what the
+   * volume is.
+   *
+   * The metadata file, if it exists, will have a path like this:
+   * <volume_base_path>/<block_path>_<genstamp>.meta
+   * So if we have a block file, there isn't any need to store the block path
+   * again.
+   *
+   * The accessor functions take care of these manipulations.
+   */
+  public static class ScanInfo implements Comparable<ScanInfo> {
+    private final long blockId;
+
+    /**
+     * The block file path, relative to the volume's base directory.
+     * If there was no block file found, this may be null. If 'vol'
+     * is null, then this is the full path of the block file.
+     */
+    private final String blockSuffix;
+
+    /**
+     * The suffix of the meta file path relative to the block file.
+     * If blockSuffix is null, then this will be the entire path relative
+     * to the volume base directory, or an absolute path if vol is also
+     * null.
+     */
+    private final String metaSuffix;
+
+    private final FsVolumeSpi volume;
+
+    /**
+     * Get the file's length in async block scan
+     */
+    private final long blockFileLength;
+
+    private final static Pattern CONDENSED_PATH_REGEX =
+        Pattern.compile("(?<!^)(\\\\|/){2,}");
+
+    private final static String QUOTED_FILE_SEPARATOR =
+        Matcher.quoteReplacement(File.separator);
+
+    /**
+     * Get the most condensed version of the path.
+     *
+     * For example, the condensed version of /foo//bar is /foo/bar
+     * Unlike {@link File#getCanonicalPath()}, this will never perform I/O
+     * on the filesystem.
+     *
+     * @param path the path to condense
+     * @return the condensed path
+     */
+    private static String getCondensedPath(String path) {
+      return CONDENSED_PATH_REGEX.matcher(path).
+          replaceAll(QUOTED_FILE_SEPARATOR);
+    }
+
+    /**
+     * Get a path suffix.
+     *
+     * @param f            The file to get the suffix for.
+     * @param prefix       The prefix we're stripping off.
+     *
+     * @return             A suffix such that prefix + suffix = path to f
+     */
+    private static String getSuffix(File f, String prefix) {
+      String fullPath = getCondensedPath(f.getAbsolutePath());
+      if (fullPath.startsWith(prefix)) {
+        return fullPath.substring(prefix.length());
+      }
+      throw new RuntimeException(prefix + " is not a prefix of " + fullPath);
+    }
+
+    /**
+     * Create a ScanInfo object for a block. This constructor will examine
+     * the block data and meta-data files.
+     *
+     * @param blockId the block ID
+     * @param blockFile the path to the block data file
+     * @param metaFile the path to the block meta-data file
+     * @param vol the volume that contains the block
+     */
+    public ScanInfo(long blockId, File blockFile, File metaFile,
+        FsVolumeSpi vol) {
+      this.blockId = blockId;
+      String condensedVolPath =
+          (vol == null || vol.getBaseURI() == null) ? null :
+            getCondensedPath(new File(vol.getBaseURI()).getAbsolutePath());
+      this.blockSuffix = blockFile == null ? null :
+        getSuffix(blockFile, condensedVolPath);
+      this.blockFileLength = (blockFile != null) ? blockFile.length() : 0;
+      if (metaFile == null) {
+        this.metaSuffix = null;
+      } else if (blockFile == null) {
+        this.metaSuffix = getSuffix(metaFile, condensedVolPath);
+      } else {
+        this.metaSuffix = getSuffix(metaFile,
+            condensedVolPath + blockSuffix);
+      }
+      this.volume = vol;
+    }
+
+    /**
+     * Returns the block data file.
+     *
+     * @return the block data file
+     */
+    public File getBlockFile() {
+      return (blockSuffix == null) ? null :
+        new File(new File(volume.getBaseURI()).getAbsolutePath(), blockSuffix);
+    }
+
+    /**
+     * Return the length of the data block. The length returned is the length
+     * cached when this object was created.
+     *
+     * @return the length of the data block
+     */
+    public long getBlockFileLength() {
+      return blockFileLength;
+    }
+
+    /**
+     * Returns the block meta data file or null if there isn't one.
+     *
+     * @return the block meta data file
+     */
+    public File getMetaFile() {
+      if (metaSuffix == null) {
+        return null;
+      } else if (blockSuffix == null) {
+        return new File(new File(volume.getBaseURI()).getAbsolutePath(),
+            metaSuffix);
+      } else {
+        return new File(new File(volume.getBaseURI()).getAbsolutePath(),
+            blockSuffix + metaSuffix);
+      }
+    }
+
+    /**
+     * Returns the block ID.
+     *
+     * @return the block ID
+     */
+    public long getBlockId() {
+      return blockId;
+    }
+
+    /**
+     * Returns the volume that contains the block that this object describes.
+     *
+     * @return the volume
+     */
+    public FsVolumeSpi getVolume() {
+      return volume;
+    }
+
+    @Override // Comparable
+    public int compareTo(ScanInfo b) {
+      if (blockId < b.blockId) {
+        return -1;
+      } else if (blockId == b.blockId) {
+        return 0;
+      } else {
+        return 1;
+      }
+    }
+
+    @Override // Object
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (!(o instanceof ScanInfo)) {
+        return false;
+      }
+      return blockId == ((ScanInfo) o).blockId;
+    }
+
+    @Override // Object
+    public int hashCode() {
+      return (int)(blockId^(blockId>>>32));
+    }
+
+    public long getGenStamp() {
+      return metaSuffix != null ? Block.getGenerationStamp(
+          getMetaFile().getName()) :
+            HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+    }
+  }
+
+  /**
+   * Compile a list of {@link ScanInfo} for the blocks in
+   * the block pool with id {@code bpid}.
+   *
+   * @param bpid block pool id to scan
+   * @param report the list onto which blocks reports are placed
+   * @param reportCompiler
+   * @throws IOException
+   */
+  LinkedList<ScanInfo> compileReport(String bpid,
+      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+      throws InterruptedException, IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
index c9160cd..b9c731b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
@@ -71,8 +71,8 @@ class FsDatasetAsyncDiskService {
   private final DataNode datanode;
   private final FsDatasetImpl fsdatasetImpl;
   private final ThreadGroup threadGroup;
-  private Map<File, ThreadPoolExecutor> executors
-      = new HashMap<File, ThreadPoolExecutor>();
+  private Map<String, ThreadPoolExecutor> executors
+      = new HashMap<String, ThreadPoolExecutor>();
   private Map<String, Set<Long>> deletedBlockIds 
       = new HashMap<String, Set<Long>>();
   private static final int MAX_DELETED_BLOCKS = 64;
@@ -91,7 +91,7 @@ class FsDatasetAsyncDiskService {
     this.threadGroup = new ThreadGroup(getClass().getSimpleName());
   }
 
-  private void addExecutorForVolume(final File volume) {
+  private void addExecutorForVolume(final FsVolumeImpl volume) {
     ThreadFactory threadFactory = new ThreadFactory() {
       int counter = 0;
 
@@ -115,18 +115,21 @@ class FsDatasetAsyncDiskService {
 
     // This can reduce the number of running threads
     executor.allowCoreThreadTimeOut(true);
-    executors.put(volume, executor);
+    executors.put(volume.getStorageID(), executor);
   }
 
   /**
    * Starts AsyncDiskService for a new volume
    * @param volume the root of the new data volume.
    */
-  synchronized void addVolume(File volume) {
+  synchronized void addVolume(FsVolumeImpl volume) {
     if (executors == null) {
       throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    if (volume == null) {
+      throw new RuntimeException("Attempt to add a null volume");
+    }
+    ThreadPoolExecutor executor = executors.get(volume.getStorageID());
     if (executor != null) {
       throw new RuntimeException("Volume " + volume + " is already existed.");
     }
@@ -137,17 +140,17 @@ class FsDatasetAsyncDiskService {
    * Stops AsyncDiskService for a volume.
    * @param volume the root of the volume.
    */
-  synchronized void removeVolume(File volume) {
+  synchronized void removeVolume(String storageId) {
     if (executors == null) {
       throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    ThreadPoolExecutor executor = executors.get(storageId);
     if (executor == null) {
-      throw new RuntimeException("Can not find volume " + volume
-          + " to remove.");
+      throw new RuntimeException("Can not find volume with storageId "
+          + storageId + " to remove.");
     } else {
       executor.shutdown();
-      executors.remove(volume);
+      executors.remove(storageId);
     }
   }
   
@@ -162,13 +165,16 @@ class FsDatasetAsyncDiskService {
   /**
    * Execute the task sometime in the future, using ThreadPools.
    */
-  synchronized void execute(File root, Runnable task) {
+  synchronized void execute(FsVolumeImpl volume, Runnable task) {
     if (executors == null) {
       throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(root);
+    if (volume == null) {
+      throw new RuntimeException("A null volume does not have a executor");
+    }
+    ThreadPoolExecutor executor = executors.get(volume.getStorageID());
     if (executor == null) {
-      throw new RuntimeException("Cannot find root " + root
+      throw new RuntimeException("Cannot find volume " + volume
           + " for execution of task " + task);
     } else {
       executor.execute(task);
@@ -185,7 +191,7 @@ class FsDatasetAsyncDiskService {
     } else {
       LOG.info("Shutting down all async disk service threads");
       
-      for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
+      for (Map.Entry<String, ThreadPoolExecutor> e : executors.entrySet()) {
         e.getValue().shutdown();
       }
       // clear the executor map so that calling execute again will fail.
@@ -198,7 +204,7 @@ class FsDatasetAsyncDiskService {
   public void submitSyncFileRangeRequest(FsVolumeImpl volume,
       final FileDescriptor fd, final long offset, final long nbytes,
       final int flags) {
-    execute(volume.getCurrentDir(), new Runnable() {
+    execute(volume, new Runnable() {
       @Override
       public void run() {
         try {
@@ -220,7 +226,7 @@ class FsDatasetAsyncDiskService {
         + " replica " + replicaToDelete + " for deletion");
     ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
         volumeRef, replicaToDelete, block, trashDirectory);
-    execute(((FsVolumeImpl) volumeRef.getVolume()).getCurrentDir(), deletionTask);
+    execute(((FsVolumeImpl) volumeRef.getVolume()), deletionTask);
   }
   
   /** A task for deleting a block file and its associated meta file, as well

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 26a2e9f..fd747bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -361,20 +361,22 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
       Collection<StorageLocation> dataLocations, DataStorage storage) {
-    Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
+    Set<StorageLocation> failedLocationSet = Sets.newHashSetWithExpectedSize(
         dataLocations.size());
     for (StorageLocation sl: dataLocations) {
-      failedLocationSet.add(sl.getFile().getAbsolutePath());
+      LOG.info("Adding to failedLocationSet " + sl);
+      failedLocationSet.add(sl);
     }
     for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
          it.hasNext(); ) {
       Storage.StorageDirectory sd = it.next();
-      failedLocationSet.remove(sd.getRoot().getAbsolutePath());
+      failedLocationSet.remove(sd.getStorageLocation());
+      LOG.info("Removing from failedLocationSet " + sd.getStorageLocation());
     }
     List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
         failedLocationSet.size());
     long failureDate = Time.now();
-    for (String failedStorageLocation: failedLocationSet) {
+    for (StorageLocation failedStorageLocation: failedLocationSet) {
       volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
           failureDate));
     }
@@ -403,49 +405,55 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           new DatanodeStorage(sd.getStorageUuid(),
               DatanodeStorage.State.NORMAL,
               storageType));
-      asyncDiskService.addVolume(sd.getCurrentDir());
+      asyncDiskService.addVolume((FsVolumeImpl) ref.getVolume());
       volumes.addVolume(ref);
     }
   }
 
   private void addVolume(Collection<StorageLocation> dataLocations,
       Storage.StorageDirectory sd) throws IOException {
-    final File dir = sd.getCurrentDir();
-    final StorageType storageType =
-        getStorageTypeFromLocations(dataLocations, sd.getRoot());
+    final StorageLocation storageLocation = sd.getStorageLocation();
 
     // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
     // nothing needed to be rolled back to make various data structures, e.g.,
     // storageMap and asyncDiskService, consistent.
-    FsVolumeImpl fsVolume = new FsVolumeImpl(
-        this, sd.getStorageUuid(), dir, this.conf, storageType);
+    FsVolumeImpl fsVolume = new FsVolumeImplBuilder()
+                              .setDataset(this)
+                              .setStorageID(sd.getStorageUuid())
+                              .setStorageDirectory(sd)
+                              .setConf(this.conf)
+                              .build();
     FsVolumeReference ref = fsVolume.obtainReference();
     ReplicaMap tempVolumeMap = new ReplicaMap(datasetLock);
     fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);
 
-    activateVolume(tempVolumeMap, sd, storageType, ref);
-    LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+    activateVolume(tempVolumeMap, sd, storageLocation.getStorageType(), ref);
+    LOG.info("Added volume - " + storageLocation + ", StorageType: " +
+        storageLocation.getStorageType());
   }
 
   @VisibleForTesting
-  public FsVolumeImpl createFsVolume(String storageUuid, File currentDir,
-      StorageType storageType) throws IOException {
-    return new FsVolumeImpl(this, storageUuid, currentDir, conf, storageType);
+  public FsVolumeImpl createFsVolume(String storageUuid,
+      Storage.StorageDirectory sd,
+      final StorageLocation location) throws IOException {
+    return new FsVolumeImplBuilder()
+        .setDataset(this)
+        .setStorageID(storageUuid)
+        .setStorageDirectory(sd)
+        .setConf(conf)
+        .build();
   }
 
   @Override
   public void addVolume(final StorageLocation location,
       final List<NamespaceInfo> nsInfos)
       throws IOException {
-    final File dir = location.getFile();
-
     // Prepare volume in DataStorage
     final DataStorage.VolumeBuilder builder;
     try {
-      builder = dataStorage.prepareVolume(datanode, location.getFile(), nsInfos);
+      builder = dataStorage.prepareVolume(datanode, location, nsInfos);
     } catch (IOException e) {
-      volumes.addVolumeFailureInfo(new VolumeFailureInfo(
-          location.getFile().getAbsolutePath(), Time.now()));
+      volumes.addVolumeFailureInfo(new VolumeFailureInfo(location, Time.now()));
       throw e;
     }
 
@@ -453,7 +461,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     StorageType storageType = location.getStorageType();
     final FsVolumeImpl fsVolume =
-        createFsVolume(sd.getStorageUuid(), sd.getCurrentDir(), storageType);
+        createFsVolume(sd.getStorageUuid(), sd, location);
     final ReplicaMap tempVolumeMap = new ReplicaMap(new AutoCloseableLock());
     ArrayList<IOException> exceptions = Lists.newArrayList();
 
@@ -482,34 +490,33 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     builder.build();
     activateVolume(tempVolumeMap, sd, storageType, ref);
-    LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+    LOG.info("Added volume - " + location + ", StorageType: " + storageType);
   }
 
   /**
    * Removes a set of volumes from FsDataset.
-   * @param volumesToRemove a set of absolute root path of each volume.
+   * @param storageLocationsToRemove a set of
+   * {@link StorageLocation}s for each volume.
    * @param clearFailure set true to clear failure information.
    */
   @Override
-  public void removeVolumes(Set<File> volumesToRemove, boolean clearFailure) {
-    // Make sure that all volumes are absolute path.
-    for (File vol : volumesToRemove) {
-      Preconditions.checkArgument(vol.isAbsolute(),
-          String.format("%s is not absolute path.", vol.getPath()));
-    }
-
+  public void removeVolumes(
+      Collection<StorageLocation> storageLocationsToRemove,
+      boolean clearFailure) {
     Map<String, List<ReplicaInfo>> blkToInvalidate = new HashMap<>();
     List<String> storageToRemove = new ArrayList<>();
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
         Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-        final File absRoot = sd.getRoot().getAbsoluteFile();
-        if (volumesToRemove.contains(absRoot)) {
-          LOG.info("Removing " + absRoot + " from FsDataset.");
-
+        final StorageLocation sdLocation = sd.getStorageLocation();
+        LOG.info("Checking removing StorageLocation " +
+            sdLocation + " with id " + sd.getStorageUuid());
+        if (storageLocationsToRemove.contains(sdLocation)) {
+          LOG.info("Removing StorageLocation " + sdLocation + " with id " +
+              sd.getStorageUuid() + " from FsDataset.");
           // Disable the volume from the service.
-          asyncDiskService.removeVolume(sd.getCurrentDir());
-          volumes.removeVolume(absRoot, clearFailure);
+          asyncDiskService.removeVolume(sd.getStorageUuid());
+          volumes.removeVolume(sdLocation, clearFailure);
           volumes.waitVolumeRemoved(5000, datasetLockCondition);
 
           // Removed all replica information for the blocks on the volume.
@@ -517,12 +524,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           // not scan disks.
           for (String bpid : volumeMap.getBlockPoolList()) {
             List<ReplicaInfo> blocks = new ArrayList<>();
-            for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
-                 it.hasNext(); ) {
+            for (Iterator<ReplicaInfo> it =
+                  volumeMap.replicas(bpid).iterator(); it.hasNext();) {
               ReplicaInfo block = it.next();
-              final File absBasePath =
-                  new File(block.getVolume().getBasePath()).getAbsoluteFile();
-              if (absBasePath.equals(absRoot)) {
+              final StorageLocation blockStorageLocation =
+                  block.getVolume().getStorageLocation();
+              LOG.info("checking for block " + block.getBlockId() +
+                  " with storageLocation " + blockStorageLocation);
+              if (blockStorageLocation.equals(sdLocation)) {
                 blocks.add(block);
                 it.remove();
               }
@@ -625,7 +634,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     List<String> failedStorageLocations = Lists.newArrayListWithCapacity(
         infos.length);
     for (VolumeFailureInfo info: infos) {
-      failedStorageLocations.add(info.getFailedStorageLocation());
+      failedStorageLocations.add(
+          info.getFailedStorageLocation().getFile().getAbsolutePath());
     }
     return failedStorageLocations.toArray(
         new String[failedStorageLocations.size()]);
@@ -663,7 +673,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     long lastVolumeFailureDate = 0;
     long estimatedCapacityLostTotal = 0;
     for (VolumeFailureInfo info: infos) {
-      failedStorageLocations.add(info.getFailedStorageLocation());
+      failedStorageLocations.add(
+          info.getFailedStorageLocation().getFile().getAbsolutePath());
       long failureDate = info.getFailureDate();
       if (failureDate > lastVolumeFailureDate) {
         lastVolumeFailureDate = failureDate;
@@ -960,25 +971,15 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
     // Copy files to temp dir first
-    File[] blockFiles = copyBlockFiles(block.getBlockId(),
-        block.getGenerationStamp(), replicaInfo,
-        targetVolume.getTmpDir(block.getBlockPoolId()),
-        replicaInfo.isOnTransientStorage(), smallBufferSize, conf);
-
-    ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY)
-        .setBlockId(replicaInfo.getBlockId())
-        .setGenerationStamp(replicaInfo.getGenerationStamp())
-        .setFsVolume(targetVolume)
-        .setDirectoryToUse(blockFiles[0].getParentFile())
-        .setBytesToReserve(0)
-        .build();
-    newReplicaInfo.setNumBytes(blockFiles[1].length());
+    ReplicaInfo newReplicaInfo = targetVolume.moveBlockToTmpLocation(block,
+        replicaInfo, smallBufferSize, conf);
+
     // Finalize the copied files
     newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       // Increment numBlocks here as this block moved without knowing to BPS
       FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
-      volume.getBlockPoolSlice(block.getBlockPoolId()).incrNumBlocks();
+      volume.incrNumBlocks(block.getBlockPoolId());
     }
 
     removeOldReplica(replicaInfo, newReplicaInfo, block.getBlockPoolId());
@@ -2072,7 +2073,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    * @return the failed volumes. Returns null if no volume failed.
    */
   @Override // FsDatasetSpi
-  public Set<File> checkDataDir() {
+  public Set<StorageLocation> checkDataDir() {
    return volumes.checkDirs();
   }
     
@@ -2250,9 +2251,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
                     .setFsVolume(vol)
                     .setDirectoryToUse(diskFile.getParentFile())
                     .build();
-              ((FsVolumeImpl) vol).getBlockPoolSlice(bpid)
-                  .resolveDuplicateReplicas(
-                      memBlockInfo, diskBlockInfo, volumeMap);
+              ((FsVolumeImpl) vol).resolveDuplicateReplicas(bpid,
+                  memBlockInfo, diskBlockInfo, volumeMap);
             }
           } else {
             if (!diskFile.delete()) {
@@ -2803,15 +2803,15 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     // Add thread for DISK volume if RamDisk is configured
     if (ramDiskConfigured &&
         asyncLazyPersistService != null &&
-        !asyncLazyPersistService.queryVolume(v.getCurrentDir())) {
-      asyncLazyPersistService.addVolume(v.getCurrentDir());
+        !asyncLazyPersistService.queryVolume(v)) {
+      asyncLazyPersistService.addVolume(v);
     }
 
     // Remove thread for DISK volume if RamDisk is not configured
     if (!ramDiskConfigured &&
         asyncLazyPersistService != null &&
-        asyncLazyPersistService.queryVolume(v.getCurrentDir())) {
-      asyncLazyPersistService.removeVolume(v.getCurrentDir());
+        asyncLazyPersistService.queryVolume(v)) {
+      asyncLazyPersistService.removeVolume(v);
     }
   }
 
@@ -2946,11 +2946,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
           // Move the replica from lazyPersist/ to finalized/ on
           // the target volume
-          BlockPoolSlice bpSlice =
-              replicaState.getLazyPersistVolume().getBlockPoolSlice(bpid);
-
           newReplicaInfo =
-              bpSlice.activateSavedReplica(replicaInfo, replicaState);
+              replicaState.getLazyPersistVolume().activateSavedReplica(bpid,
+                  replicaInfo, replicaState);
 
           // Update the volumeMap entry.
           volumeMap.add(bpid, newReplicaInfo);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/51] [abbrv] hadoop git commit: Merge branch 'HADOOP-12756' into trunk

Posted by ae...@apache.org.
Merge branch 'HADOOP-12756' into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/669d6f13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/669d6f13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/669d6f13

Branch: refs/heads/HDFS-7240
Commit: 669d6f13ec48a90d4ba7e4ed1dd0e9687580f8f3
Parents: c874fa9 c31b5e6
Author: Kai Zheng <ka...@intel.com>
Authored: Tue Oct 11 03:22:11 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Tue Oct 11 03:22:11 2016 +0600

----------------------------------------------------------------------
 .gitignore                                      |   2 +
 hadoop-project/pom.xml                          |  22 +
 .../dev-support/findbugs-exclude.xml            |  18 +
 hadoop-tools/hadoop-aliyun/pom.xml              | 154 +++++
 .../aliyun/oss/AliyunCredentialsProvider.java   |  87 +++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java      | 580 +++++++++++++++++++
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 516 +++++++++++++++++
 .../fs/aliyun/oss/AliyunOSSInputStream.java     | 260 +++++++++
 .../fs/aliyun/oss/AliyunOSSOutputStream.java    | 111 ++++
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java    | 167 ++++++
 .../apache/hadoop/fs/aliyun/oss/Constants.java  | 113 ++++
 .../hadoop/fs/aliyun/oss/package-info.java      |  22 +
 .../site/markdown/tools/hadoop-aliyun/index.md  | 294 ++++++++++
 .../fs/aliyun/oss/AliyunOSSTestUtils.java       |  77 +++
 .../fs/aliyun/oss/TestAliyunCredentials.java    |  78 +++
 .../oss/TestAliyunOSSFileSystemContract.java    | 239 ++++++++
 .../oss/TestAliyunOSSFileSystemStore.java       | 125 ++++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java | 145 +++++
 .../aliyun/oss/TestAliyunOSSOutputStream.java   |  91 +++
 .../aliyun/oss/contract/AliyunOSSContract.java  |  49 ++
 .../contract/TestAliyunOSSContractCreate.java   |  35 ++
 .../contract/TestAliyunOSSContractDelete.java   |  34 ++
 .../contract/TestAliyunOSSContractDistCp.java   |  44 ++
 .../TestAliyunOSSContractGetFileStatus.java     |  35 ++
 .../contract/TestAliyunOSSContractMkdir.java    |  34 ++
 .../oss/contract/TestAliyunOSSContractOpen.java |  34 ++
 .../contract/TestAliyunOSSContractRename.java   |  35 ++
 .../contract/TestAliyunOSSContractRootDir.java  |  69 +++
 .../oss/contract/TestAliyunOSSContractSeek.java |  34 ++
 .../src/test/resources/contract/aliyun-oss.xml  | 115 ++++
 .../src/test/resources/core-site.xml            |  46 ++
 .../src/test/resources/log4j.properties         |  23 +
 hadoop-tools/hadoop-tools-dist/pom.xml          |   6 +
 hadoop-tools/pom.xml                            |   1 +
 34 files changed, 3695 insertions(+)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/51] [abbrv] hadoop git commit: HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. Contributed by Xiaobing Zhou.

Posted by ae...@apache.org.
HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b371c563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b371c563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b371c563

Branch: refs/heads/HDFS-7240
Commit: b371c56365c14bbab0f5cdfffc0becaabfde8145
Parents: 1291254
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Oct 13 10:26:07 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Oct 13 10:26:07 2016 -0700

----------------------------------------------------------------------
 .../server/diskbalancer/TestDiskBalancer.java   | 44 +++++---------------
 1 file changed, 11 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b371c563/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index d911e74..9985210 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
-import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -137,6 +136,7 @@ public class TestDiskBalancer {
     final int dataNodeCount = 1;
     final int dataNodeIndex = 0;
     final int sourceDiskIndex = 0;
+    final long cap = blockSize * 2L * blockCount;
 
     MiniDFSCluster cluster = new ClusterBuilder()
         .setBlockCount(blockCount)
@@ -144,6 +144,7 @@ public class TestDiskBalancer {
         .setDiskCount(diskCount)
         .setNumDatanodes(dataNodeCount)
         .setConf(conf)
+        .setCapacities(new long[] {cap, cap})
         .build();
     try {
       DataMover dataMover = new DataMover(cluster, dataNodeIndex,
@@ -174,7 +175,7 @@ public class TestDiskBalancer {
     final int dataNodeCount = 1;
     final int dataNodeIndex = 0;
     final int sourceDiskIndex = 0;
-
+    final long cap = blockSize * 2L * blockCount;
 
     MiniDFSCluster cluster = new ClusterBuilder()
         .setBlockCount(blockCount)
@@ -182,9 +183,9 @@ public class TestDiskBalancer {
         .setDiskCount(diskCount)
         .setNumDatanodes(dataNodeCount)
         .setConf(conf)
+        .setCapacities(new long[] {cap, cap, cap})
         .build();
 
-
     try {
       DataMover dataMover = new DataMover(cluster, dataNodeIndex,
           sourceDiskIndex, conf, blockSize, blockCount);
@@ -221,6 +222,7 @@ public class TestDiskBalancer {
     final int dataNodeCount = 1;
     final int dataNodeIndex = 0;
     final int sourceDiskIndex = 0;
+    final long cap = blockSize * 2L * blockCount;
 
     MiniDFSCluster cluster = new ClusterBuilder()
         .setBlockCount(blockCount)
@@ -228,6 +230,7 @@ public class TestDiskBalancer {
         .setDiskCount(diskCount)
         .setNumDatanodes(dataNodeCount)
         .setConf(conf)
+        .setCapacities(new long[] {cap, cap})
         .build();
 
     try {
@@ -246,24 +249,6 @@ public class TestDiskBalancer {
   }
 
   /**
-   * Sets alll Disks capacity to size specified.
-   *
-   * @param cluster - DiskBalancerCluster
-   * @param size    - new size of the disk
-   */
-  private void setVolumeCapacity(DiskBalancerCluster cluster, long size,
-                                 String diskType) {
-    Preconditions.checkNotNull(cluster);
-    for (DiskBalancerDataNode node : cluster.getNodes()) {
-      for (DiskBalancerVolume vol :
-          node.getVolumeSets().get(diskType).getVolumes()) {
-        vol.setCapacity(size);
-      }
-      node.getVolumeSets().get(diskType).computeVolumeDataDensity();
-    }
-  }
-
-  /**
    * Helper class that allows us to create different kinds of MiniDFSClusters
    * and populate data.
    */
@@ -274,6 +259,7 @@ public class TestDiskBalancer {
     private int fileLen;
     private int blockCount;
     private int diskCount;
+    private long[] capacities;
 
     public ClusterBuilder setConf(Configuration conf) {
       this.conf = conf;
@@ -300,13 +286,9 @@ public class TestDiskBalancer {
       return this;
     }
 
-    private long[] getCapacities(int diskCount, int bSize, int fSize) {
-      Preconditions.checkState(diskCount > 0);
-      long[] capacities = new long[diskCount];
-      for (int x = 0; x < diskCount; x++) {
-        capacities[x] = diskCount * bSize * fSize * 2L;
-      }
-      return capacities;
+    private ClusterBuilder setCapacities(final long[] caps) {
+      this.capacities = caps;
+      return this;
     }
 
     private StorageType[] getStorageTypes(int diskCount) {
@@ -338,7 +320,7 @@ public class TestDiskBalancer {
       // Write a file and restart the cluster
       MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
           .numDataNodes(numDatanodes)
-          .storageCapacities(getCapacities(diskCount, blockSize, fileLen))
+          .storageCapacities(capacities)
           .storageTypes(getStorageTypes(diskCount))
           .storagesPerDatanode(diskCount)
           .build();
@@ -447,10 +429,6 @@ public class TestDiskBalancer {
       diskBalancerCluster.readClusterInfo();
       List<DiskBalancerDataNode> nodesToProcess = new LinkedList<>();
 
-      // Rewrite the capacity in the model to show that disks need
-      // re-balancing.
-      setVolumeCapacity(diskBalancerCluster, blockSize * 2L * blockCount,
-          "DISK");
       // Pick a node to process.
       nodesToProcess.add(diskBalancerCluster.getNodeByUUID(
           node.getDatanodeUuid()));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/51] [abbrv] hadoop git commit: HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.

Posted by ae...@apache.org.
HDFS-10933. Refactor TestFsck. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3059b251
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3059b251
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3059b251

Branch: refs/heads/HDFS-7240
Commit: 3059b251d8f37456c5761ecaf73fe6c0c5a59067
Parents: be3cb10
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Oct 7 10:17:50 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Oct 7 10:17:50 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 2482 ++++++++----------
 1 file changed, 1152 insertions(+), 1330 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/51] [abbrv] hadoop git commit: HADOOP-13684. Snappy may complain Hadoop is built without snappy if libhadoop is not found. Contributed by Wei-Chiu Chuang.

Posted by ae...@apache.org.
HADOOP-13684. Snappy may complain Hadoop is built without snappy if libhadoop is not found. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b32b142
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b32b142
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b32b142

Branch: refs/heads/HDFS-7240
Commit: 4b32b1420d98ea23460d05ae94f2698109b3d6f7
Parents: 2fb392a
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Tue Oct 11 13:21:33 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Tue Oct 11 13:21:33 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/io/compress/SnappyCodec.java  | 30 +++++++++++---------
 1 file changed, 16 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b32b142/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 2a9c5d0..20a4cd6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -60,20 +60,22 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
    * Are the native snappy libraries loaded & initialized?
    */
   public static void checkNativeCodeLoaded() {
-      if (!NativeCodeLoader.isNativeCodeLoaded() ||
-          !NativeCodeLoader.buildSupportsSnappy()) {
-        throw new RuntimeException("native snappy library not available: " +
-            "this version of libhadoop was built without " +
-            "snappy support.");
-      }
-      if (!SnappyCompressor.isNativeCodeLoaded()) {
-        throw new RuntimeException("native snappy library not available: " +
-            "SnappyCompressor has not been loaded.");
-      }
-      if (!SnappyDecompressor.isNativeCodeLoaded()) {
-        throw new RuntimeException("native snappy library not available: " +
-            "SnappyDecompressor has not been loaded.");
-      }
+    if (!NativeCodeLoader.buildSupportsSnappy()) {
+      throw new RuntimeException("native snappy library not available: " +
+          "this version of libhadoop was built without " +
+          "snappy support.");
+    }
+    if (!NativeCodeLoader.isNativeCodeLoaded()) {
+      throw new RuntimeException("Failed to load libhadoop.");
+    }
+    if (!SnappyCompressor.isNativeCodeLoaded()) {
+      throw new RuntimeException("native snappy library not available: " +
+          "SnappyCompressor has not been loaded.");
+    }
+    if (!SnappyDecompressor.isNativeCodeLoaded()) {
+      throw new RuntimeException("native snappy library not available: " +
+          "SnappyDecompressor has not been loaded.");
+    }
   }
   
   public static boolean isNativeCodeLoaded() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/51] [abbrv] hadoop git commit: HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu

Posted by ae...@apache.org.
HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12912540
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12912540
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12912540

Branch: refs/heads/HDFS-7240
Commit: 129125404244f35ee63b8f0491a095371685e9ba
Parents: 9454dc5
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Thu Oct 13 21:39:50 2016 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Thu Oct 13 22:05:00 2016 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   8 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 106 +++++++++----------
 2 files changed, 51 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 32401dc..a60f24b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -936,8 +936,7 @@ public class DFSAdmin extends FsShell {
       System.out.println("Balancer bandwidth is " + bandwidth
           + " bytes per second.");
     } catch (IOException ioe) {
-      System.err.println("Datanode unreachable.");
-      return -1;
+      throw new IOException("Datanode unreachable. " + ioe, ioe);
     }
     return 0;
   }
@@ -2207,7 +2206,7 @@ public class DFSAdmin extends FsShell {
       dnProxy.evictWriters();
       System.out.println("Requested writer eviction to datanode " + dn);
     } catch (IOException ioe) {
-      return -1;
+      throw new IOException("Datanode unreachable. " + ioe, ioe);
     }
     return 0;
   }
@@ -2218,8 +2217,7 @@ public class DFSAdmin extends FsShell {
       DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
       System.out.println(dnInfo.getDatanodeLocalReport());
     } catch (IOException ioe) {
-      System.err.println("Datanode unreachable.");
-      return -1;
+      throw new IOException("Datanode unreachable. " + ioe, ioe);
     }
     return 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b49f73d..dca42ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -79,6 +80,7 @@ public class TestDFSAdmin {
   @Before
   public void setUp() throws Exception {
     conf = new Configuration();
+    conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
     restartCluster();
 
     admin = new DFSAdmin();
@@ -116,7 +118,7 @@ public class TestDFSAdmin {
     if (cluster != null) {
       cluster.shutdown();
     }
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     cluster.waitActive();
     datanode = cluster.getDataNodes().get(0);
     namenode = cluster.getNameNode();
@@ -171,70 +173,58 @@ public class TestDFSAdmin {
   @Test(timeout = 30000)
   public void testGetDatanodeInfo() throws Exception {
     redirectStream();
-    final Configuration dfsConf = new HdfsConfiguration();
-    final int numDn = 2;
-
-    /* init cluster */
-    try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
-        .numDataNodes(numDn).build()) {
-
-      miniCluster.waitActive();
-      assertEquals(numDn, miniCluster.getDataNodes().size());
-      final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
 
-      /* init reused vars */
-      List<String> outs = null;
-      int ret;
-
-      /**
-       * test erroneous run
-       */
+    for (int i = 0; i < cluster.getDataNodes().size(); i++) {
       resetStream();
-      outs = Lists.newArrayList();
-
-      /* invoke getDatanodeInfo */
-      ret = ToolRunner.run(
-          dfsAdmin,
-          new String[] {"-getDatanodeInfo", "128.0.0.1:1234"});
+      final DataNode dn = cluster.getDataNodes().get(i);
+      final String addr = String.format(
+          "%s:%d",
+          dn.getXferAddress().getHostString(),
+          dn.getIpcPort());
+      final int ret = ToolRunner.run(dfsAdmin,
+          new String[]{"-getDatanodeInfo", addr});
+      assertEquals(0, ret);
 
       /* collect outputs */
+      final List<String> outs = Lists.newArrayList();
       scanIntoList(out, outs);
-
       /* verify results */
+      assertEquals(
+          "One line per DataNode like: Uptime: XXX, Software version: x.y.z,"
+              + " Config version: core-x.y.z,hdfs-x",
+          1, outs.size());
+      assertThat(outs.get(0),
+          is(allOf(containsString("Uptime:"),
+              containsString("Software version"),
+              containsString("Config version"))));
+    }
+  }
+
+  /**
+   * Test that if datanode is not reachable, some DFSAdmin commands will fail
+   * elegantly with non-zero ret error code along with exception error message.
+   */
+  @Test(timeout = 60000)
+  public void testDFSAdminUnreachableDatanode() throws Exception {
+    redirectStream();
+    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
+    for (String command : new String[]{"-getDatanodeInfo",
+        "-evictWriters", "-getBalancerBandwidth"}) {
+      // Connecting to Xfer port instead of IPC port will get
+      // Datanode unreachable. java.io.EOFException
+      final String dnDataAddr = datanode.getXferAddress().getHostString() + ":"
+          + datanode.getXferPort();
+      resetStream();
+      final List<String> outs = Lists.newArrayList();
+      final int ret = ToolRunner.run(dfsAdmin,
+          new String[]{command, dnDataAddr});
       assertEquals(-1, ret);
-      assertTrue("Unexpected getDatanodeInfo stdout", outs.isEmpty());
-
-      /**
-       * test normal run
-       */
-      for (int i = 0; i < numDn; i++) {
-        resetStream();
-        final DataNode dn = miniCluster.getDataNodes().get(i);
-
-        /* invoke getDatanodeInfo */
-        final String addr = String.format(
-            "%s:%d",
-            dn.getXferAddress().getHostString(),
-            dn.getIpcPort());
-        ret = ToolRunner.run(
-            dfsAdmin,
-            new String[] {"-getDatanodeInfo", addr});
-
-        /* collect outputs */
-        outs = Lists.newArrayList();
-        scanIntoList(out, outs);
-
-        /* verify results */
-        assertEquals(0, ret);
-        assertEquals(
-            "One line per DataNode like: Uptime: XXX, Software version: x.y.z,"
-                + " Config version: core-x.y.z,hdfs-x",
-            1, outs.size());
-        assertThat(outs.get(0),
-            is(allOf(containsString("Uptime:"),
-                containsString("Software version"),
-                containsString("Config version"))));
-      }
+
+      scanIntoList(out, outs);
+      assertTrue("Unexpected " + command + " stdout: " + out, outs.isEmpty());
+      assertTrue("Unexpected " + command + " stderr: " + err,
+          err.toString().contains("Exception"));
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/51] [abbrv] hadoop git commit: HADOOP-13697. LogLevel#main should not throw exception if no arguments. Contributed by Mingliang Liu

Posted by ae...@apache.org.
HADOOP-13697. LogLevel#main should not throw exception if no arguments. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fb392a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fb392a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fb392a5

Branch: refs/heads/HDFS-7240
Commit: 2fb392a587d288b628936ca6d18fabad04afc585
Parents: 809cfd2
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Oct 7 14:05:40 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Oct 11 10:57:08 2016 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/log/LogLevel.java       | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fb392a5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 4fa839f..79eae12 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -47,15 +47,17 @@ import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * Change log level in runtime.
  */
 @InterfaceStability.Evolving
 public class LogLevel {
-  public static final String USAGES = "\nUsage: General options are:\n"
+  public static final String USAGES = "\nUsage: Command options are:\n"
       + "\t[-getlevel <host:port> <classname> [-protocol (http|https)]\n"
       + "\t[-setlevel <host:port> <classname> <level> "
       + "[-protocol (http|https)]\n";
@@ -67,7 +69,7 @@ public class LogLevel {
    */
   public static void main(String[] args) throws Exception {
     CLI cli = new CLI(new Configuration());
-    System.exit(cli.run(args));
+    System.exit(ToolRunner.run(cli, args));
   }
 
   /**
@@ -81,6 +83,7 @@ public class LogLevel {
 
   private static void printUsage() {
     System.err.println(USAGES);
+    GenericOptionsParser.printGenericCommandUsage(System.err);
   }
 
   public static boolean isValidProtocol(String protocol) {
@@ -107,7 +110,7 @@ public class LogLevel {
         sendLogLevelRequest();
       } catch (HadoopIllegalArgumentException e) {
         printUsage();
-        throw e;
+        return -1;
       }
       return 0;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/51] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d70e57a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d70e57a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d70e57a

Branch: refs/heads/HDFS-7240
Commit: 7d70e57a137622043033d37e16d478e9bc98d60d
Parents: ef84ac4 0a85d07
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Oct 13 15:15:30 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Oct 13 15:15:30 2016 -0700

----------------------------------------------------------------------
 .gitignore                                      |    2 +
 .../server/KerberosAuthenticationHandler.java   |    7 +-
 .../util/RolloverSignerSecretProvider.java      |    2 +-
 .../util/TestZKSignerSecretProvider.java        |  221 +-
 .../dev-support/findbugsExcludeFile.xml         |    5 +
 hadoop-common-project/hadoop-common/pom.xml     |    1 +
 .../org/apache/hadoop/conf/ConfServlet.java     |   19 +-
 .../org/apache/hadoop/conf/Configuration.java   |  307 ++-
 .../apache/hadoop/fs/CachingGetSpaceUsed.java   |    3 +-
 .../apache/hadoop/fs/DFCachingGetSpaceUsed.java |   48 +
 .../src/main/java/org/apache/hadoop/fs/DU.java  |    8 +-
 .../apache/hadoop/fs/FileEncryptionInfo.java    |   21 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   10 +-
 .../java/org/apache/hadoop/fs/TrashPolicy.java  |   36 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java    |   15 +
 .../apache/hadoop/fs/permission/AclEntry.java   |   24 +-
 .../hadoop/fs/permission/AclEntryScope.java     |    2 +-
 .../hadoop/fs/permission/AclEntryType.java      |   23 +-
 .../apache/hadoop/fs/permission/AclStatus.java  |    2 +-
 .../org/apache/hadoop/fs/shell/AclCommands.java |    6 +-
 .../apache/hadoop/io/compress/SnappyCodec.java  |   30 +-
 .../org/apache/hadoop/ipc/ExternalCall.java     |   88 +
 .../main/java/org/apache/hadoop/ipc/Server.java |   63 +-
 .../apache/hadoop/ipc/WritableRpcEngine.java    |    5 +-
 .../java/org/apache/hadoop/log/LogLevel.java    |    9 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |    2 +-
 .../org/apache/hadoop/security/Credentials.java |    8 +-
 .../hadoop/security/KerberosAuthException.java  |  118 +
 .../hadoop/security/UGIExceptionMessages.java   |   46 +
 .../hadoop/security/UserGroupInformation.java   |  203 +-
 .../org/apache/hadoop/security/token/Token.java |   60 +-
 .../src/main/resources/core-default.xml         |    6 +-
 .../src/site/markdown/CommandsManual.md         |    4 +-
 .../src/site/markdown/FileSystemShell.md        |    3 +-
 .../src/site/markdown/filesystem/filesystem.md  |   77 +-
 .../org/apache/hadoop/conf/TestConfServlet.java |  122 +-
 .../apache/hadoop/conf/TestConfiguration.java   |  164 +-
 .../apache/hadoop/fs/FileContextURIBase.java    |    4 +-
 .../hadoop/fs/TestDFCachingGetSpaceUsed.java    |   75 +
 .../hadoop/fs/TestFileSystemInitialization.java |   12 +-
 .../java/org/apache/hadoop/fs/TestTrash.java    |    4 +
 .../AbstractContractRootDirectoryTest.java      |   34 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   39 +
 .../hadoop/ha/TestZKFailoverController.java     |   34 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |   87 +
 .../security/TestUserGroupInformation.java      |   33 +-
 .../hadoop/crypto/key/kms/server/KMS.java       |  665 +++--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |    4 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |   30 +
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |   21 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java    |   88 +-
 .../hdfs/web/resources/AclPermissionParam.java  |   23 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |    1 -
 .../hadoop/fs/http/server/FSOperations.java     |    9 +-
 .../service/hadoop/FileSystemAccessService.java |    6 +-
 .../src/main/native/libhdfs/include/hdfs/hdfs.h |    1 +
 .../src/contrib/bkjournal/README.txt            |   66 -
 .../dev-support/findbugsExcludeFile.xml         |    5 -
 .../hadoop-hdfs/src/contrib/bkjournal/pom.xml   |  175 --
 .../bkjournal/BookKeeperEditLogInputStream.java |  264 --
 .../BookKeeperEditLogOutputStream.java          |  188 --
 .../bkjournal/BookKeeperJournalManager.java     |  893 -------
 .../contrib/bkjournal/CurrentInprogress.java    |  160 --
 .../bkjournal/EditLogLedgerMetadata.java        |  217 --
 .../hadoop/contrib/bkjournal/MaxTxId.java       |  103 -
 .../bkjournal/src/main/proto/bkjournal.proto    |   49 -
 .../hadoop/contrib/bkjournal/BKJMUtil.java      |  184 --
 .../bkjournal/TestBookKeeperAsHASharedDir.java  |  414 ---
 .../bkjournal/TestBookKeeperConfiguration.java  |  174 --
 .../bkjournal/TestBookKeeperEditLogStreams.java |   92 -
 .../bkjournal/TestBookKeeperHACheckpoints.java  |  109 -
 .../bkjournal/TestBookKeeperJournalManager.java |  984 -------
 .../TestBookKeeperSpeculativeRead.java          |  167 --
 .../bkjournal/TestBootstrapStandbyWithBKJM.java |  170 --
 .../bkjournal/TestCurrentInprogress.java        |  160 --
 .../hdfs/server/namenode/FSEditLogTestUtil.java |   40 -
 .../src/test/resources/log4j.properties         |   55 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |    6 +
 .../java/org/apache/hadoop/hdfs/HAUtil.java     |    5 +-
 .../server/blockmanagement/BlockManager.java    |   37 +-
 .../BlockPlacementPolicyDefault.java            |    5 +-
 .../blockmanagement/DecommissionManager.java    |   29 +-
 .../hdfs/server/common/HdfsServerConstants.java |    2 +-
 .../hadoop/hdfs/server/common/Storage.java      |   22 +
 .../server/datanode/BlockPoolSliceStorage.java  |   20 +-
 .../hdfs/server/datanode/BlockScanner.java      |    8 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   34 +-
 .../hdfs/server/datanode/DataStorage.java       |   34 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |  320 +--
 .../hdfs/server/datanode/DiskBalancer.java      |   25 +-
 .../hdfs/server/datanode/LocalReplica.java      |    2 +-
 .../hdfs/server/datanode/ReplicaInfo.java       |    2 +-
 .../hdfs/server/datanode/StorageLocation.java   |   32 +-
 .../hdfs/server/datanode/VolumeScanner.java     |   27 +-
 .../erasurecode/StripedBlockReader.java         |    2 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |    5 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  234 +-
 .../impl/FsDatasetAsyncDiskService.java         |   40 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  136 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  233 +-
 .../fsdataset/impl/FsVolumeImplBuilder.java     |   65 +
 .../datanode/fsdataset/impl/FsVolumeList.java   |   44 +-
 .../impl/RamDiskAsyncLazyPersistService.java    |   79 +-
 .../fsdataset/impl/VolumeFailureInfo.java       |   13 +-
 .../hdfs/server/namenode/CacheManager.java      |   12 +-
 .../ContentSummaryComputationContext.java       |   94 +-
 .../namenode/EncryptionFaultInjector.java       |    6 +
 .../server/namenode/EncryptionZoneManager.java  |   25 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |    4 +-
 .../hdfs/server/namenode/FSDirAttrOp.java       |  110 +-
 .../hdfs/server/namenode/FSDirDeleteOp.java     |   67 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |  148 +-
 .../server/namenode/FSDirErasureCodingOp.java   |    2 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java      |    2 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |   16 +-
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |   22 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  281 +-
 .../hdfs/server/namenode/FSDirSymlinkOp.java    |    2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  163 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java      |   27 +-
 .../hdfs/server/namenode/FSDirectory.java       |   33 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   94 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  290 +--
 .../hdfs/server/namenode/FSNamesystemLock.java  |  187 +-
 .../hadoop/hdfs/server/namenode/INode.java      |    1 +
 .../server/namenode/INodeAttributeProvider.java |   10 +-
 .../hdfs/server/namenode/INodeDirectory.java    |   11 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |    1 +
 .../hdfs/server/namenode/INodeReference.java    |    2 +
 .../hdfs/server/namenode/INodeSymlink.java      |    1 +
 .../hdfs/server/namenode/INodesInPath.java      |    7 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   12 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |    6 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |   23 +-
 .../hdfs/server/namenode/XAttrStorage.java      |    7 +-
 .../ha/RequestHedgingProxyProvider.java         |   25 +-
 .../snapshot/DirectorySnapshottableFeature.java |    9 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |   14 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |    1 +
 .../server/namenode/top/metrics/TopMetrics.java |   67 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |  150 +-
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   |   51 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   10 +-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    |    2 +-
 .../src/main/resources/hdfs-default.xml         |   21 +-
 .../src/site/markdown/ExtendedAttributes.md     |    4 +-
 .../src/site/markdown/HDFSCommands.md           |    2 +-
 .../src/site/markdown/HDFSErasureCoding.md      |   26 +-
 .../markdown/HDFSHighAvailabilityWithNFS.md     |  114 -
 .../src/site/markdown/TransparentEncryption.md  |   16 +
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md    |    2 +-
 .../hadoop/fs/TestEnhancedByteBufferAccess.java |   17 +-
 .../hadoop/hdfs/TestBalancerBandwidth.java      |   57 +-
 .../hadoop/hdfs/TestDFSClientRetries.java       |    4 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java    | 2066 +++++++--------
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  109 +-
 .../org/apache/hadoop/hdfs/TestFileAppend4.java |    3 +-
 .../apache/hadoop/hdfs/TestFileCorruption.java  |   61 +
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |    1 +
 .../blockmanagement/TestBlockTokenWithDFS.java  |    3 +-
 .../TestNameNodePrunesMissingStorages.java      |   15 +-
 ...constructStripedBlocksWithRackAwareness.java |  158 +-
 .../server/datanode/SimulatedFSDataset.java     |   46 +-
 .../hdfs/server/datanode/TestBlockScanner.java  |    3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java    |   15 +-
 .../datanode/TestDataNodeVolumeFailure.java     |   12 +-
 .../TestDataNodeVolumeFailureReporting.java     |   10 +
 .../server/datanode/TestDirectoryScanner.java   |   76 +-
 .../hdfs/server/datanode/TestDiskError.java     |    2 +-
 .../extdataset/ExternalDatasetImpl.java         |   10 +-
 .../datanode/extdataset/ExternalVolumeImpl.java |   44 +-
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |    9 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       |   69 +-
 .../fsdataset/impl/TestFsVolumeList.java        |   83 +-
 .../server/diskbalancer/TestDiskBalancer.java   |   44 +-
 .../TestDiskBalancerWithMockMover.java          |    4 +-
 .../server/namenode/TestCacheDirectives.java    |   10 +
 .../hdfs/server/namenode/TestFSDirectory.java   |   48 +
 .../hdfs/server/namenode/TestFSNamesystem.java  |  292 ---
 .../server/namenode/TestFSNamesystemLock.java   |  317 +++
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 2403 +++++++++---------
 .../server/namenode/TestNamenodeRetryCache.java |   25 +-
 .../server/namenode/TestSnapshotPathINodes.java |   22 +
 .../ha/TestRequestHedgingProxyProvider.java     |   18 +-
 .../server/namenode/metrics/TestTopMetrics.java |   63 +
 .../snapshot/TestRenameWithSnapshots.java       |  199 ++
 .../web/resources/TestWebHdfsDataLocality.java  |   25 +-
 .../shortcircuit/TestShortCircuitCache.java     |    9 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |  166 +-
 .../src/test/resources/testCryptoConf.xml       |   90 +
 hadoop-hdfs-project/pom.xml                     |    1 -
 .../hadoop/mapreduce/v2/app/MRAppMaster.java    |   90 +-
 .../hadoop/mapreduce/v2/app/TestRecovery.java   |   66 +
 .../mapreduce/v2/app/webapp/TestAMWebApp.java   |    8 +-
 .../apache/hadoop/mapreduce/MRJobConfig.java    |    2 +-
 .../src/main/resources/mapred-default.xml       |   10 +-
 .../apache/hadoop/mapred/JobClientUnitTest.java |   34 +-
 .../hadoop/examples/terasort/TeraGen.java       |    3 +
 .../examples/terasort/TeraOutputFormat.java     |   20 +-
 .../hadoop/examples/terasort/TeraSort.java      |    3 +
 .../plugin/versioninfo/VersionInfoMojo.java     |    2 +-
 hadoop-project-dist/pom.xml                     |   17 -
 hadoop-project/pom.xml                          |   30 +-
 .../dev-support/findbugs-exclude.xml            |   18 +
 hadoop-tools/hadoop-aliyun/pom.xml              |  154 ++
 .../aliyun/oss/AliyunCredentialsProvider.java   |   87 +
 .../fs/aliyun/oss/AliyunOSSFileSystem.java      |  580 +++++
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java |  516 ++++
 .../fs/aliyun/oss/AliyunOSSInputStream.java     |  260 ++
 .../fs/aliyun/oss/AliyunOSSOutputStream.java    |  111 +
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java    |  167 ++
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  113 +
 .../hadoop/fs/aliyun/oss/package-info.java      |   22 +
 .../site/markdown/tools/hadoop-aliyun/index.md  |  294 +++
 .../fs/aliyun/oss/AliyunOSSTestUtils.java       |   77 +
 .../fs/aliyun/oss/TestAliyunCredentials.java    |   78 +
 .../oss/TestAliyunOSSFileSystemContract.java    |  239 ++
 .../oss/TestAliyunOSSFileSystemStore.java       |  125 +
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  145 ++
 .../aliyun/oss/TestAliyunOSSOutputStream.java   |   91 +
 .../aliyun/oss/contract/AliyunOSSContract.java  |   49 +
 .../contract/TestAliyunOSSContractCreate.java   |   35 +
 .../contract/TestAliyunOSSContractDelete.java   |   34 +
 .../contract/TestAliyunOSSContractDistCp.java   |   44 +
 .../TestAliyunOSSContractGetFileStatus.java     |   35 +
 .../contract/TestAliyunOSSContractMkdir.java    |   34 +
 .../oss/contract/TestAliyunOSSContractOpen.java |   34 +
 .../contract/TestAliyunOSSContractRename.java   |   35 +
 .../contract/TestAliyunOSSContractRootDir.java  |   69 +
 .../oss/contract/TestAliyunOSSContractSeek.java |   34 +
 .../src/test/resources/contract/aliyun-oss.xml  |  115 +
 .../src/test/resources/core-site.xml            |   46 +
 .../src/test/resources/log4j.properties         |   23 +
 hadoop-tools/hadoop-aws/pom.xml                 |   12 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  104 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |   17 +
 .../src/site/markdown/tools/hadoop-aws/index.md |   42 +-
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |   55 -
 .../hadoop/fs/s3a/ITestS3AMiscOperations.java   |   63 +
 .../fs/s3a/TestS3AExceptionTranslation.java     |  127 +
 hadoop-tools/hadoop-azure-datalake/pom.xml      |    4 +
 ...ClientCredentialBasedAccesTokenProvider.java |    5 +-
 hadoop-tools/hadoop-azure/pom.xml               |    6 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |   16 +-
 .../apache/hadoop/tools/DistCpConstants.java    |   12 +-
 .../hadoop/tools/mapred/CopyCommitter.java      |    5 +-
 .../hadoop/tools/TestDistCpWithRawXAttrs.java   |   45 +-
 .../hadoop/tools/util/DistCpTestUtils.java      |   32 +-
 hadoop-tools/hadoop-kafka/pom.xml               |    2 +-
 hadoop-tools/hadoop-openstack/pom.xml           |   18 +-
 .../swift/auth/ApiKeyAuthenticationRequest.java |    2 +-
 .../fs/swift/auth/entities/AccessToken.java     |    2 +-
 .../hadoop/fs/swift/auth/entities/Catalog.java  |    2 +-
 .../hadoop/fs/swift/auth/entities/Endpoint.java |    2 +-
 .../hadoop/fs/swift/auth/entities/Tenant.java   |    2 +-
 .../hadoop/fs/swift/auth/entities/User.java     |    2 +-
 .../snative/SwiftNativeFileSystemStore.java     |    3 +-
 .../apache/hadoop/fs/swift/util/JSONUtil.java   |   24 +-
 hadoop-tools/hadoop-rumen/pom.xml               |    9 +
 .../apache/hadoop/tools/rumen/Anonymizer.java   |   23 +-
 .../hadoop/tools/rumen/HadoopLogsAnalyzer.java  |    3 +-
 .../tools/rumen/JsonObjectMapperParser.java     |   17 +-
 .../tools/rumen/JsonObjectMapperWriter.java     |   21 +-
 .../apache/hadoop/tools/rumen/LoggedJob.java    |    2 +-
 .../hadoop/tools/rumen/LoggedLocation.java      |    2 +-
 .../tools/rumen/LoggedNetworkTopology.java      |    2 +-
 .../rumen/LoggedSingleRelativeRanking.java      |    4 +-
 .../apache/hadoop/tools/rumen/LoggedTask.java   |    2 +-
 .../hadoop/tools/rumen/LoggedTaskAttempt.java   |    2 +-
 .../hadoop/tools/rumen/datatypes/NodeName.java  |    2 +-
 .../rumen/serializers/BlockingSerializer.java   |   10 +-
 .../DefaultAnonymizingRumenSerializer.java      |    8 +-
 .../serializers/DefaultRumenSerializer.java     |    9 +-
 .../serializers/ObjectStringSerializer.java     |   10 +-
 .../apache/hadoop/tools/rumen/state/State.java  |    2 +-
 .../tools/rumen/state/StateDeserializer.java    |   14 +-
 .../hadoop/tools/rumen/state/StatePool.java     |   36 +-
 .../hadoop/tools/rumen/TestHistograms.java      |   13 +-
 hadoop-tools/hadoop-sls/pom.xml                 |    4 +
 .../hadoop/yarn/sls/RumenToSLSConverter.java    |    8 +-
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |    7 +-
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  |   10 +-
 hadoop-tools/hadoop-tools-dist/pom.xml          |    6 +
 hadoop-tools/pom.xml                            |    1 +
 .../yarn/api/records/ReservationDefinition.java |   44 +-
 .../org/apache/hadoop/yarn/api/records/URL.java |   58 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |   20 +-
 .../ReplaceLabelsOnNodeRequest.java             |    8 +
 ..._server_resourcemanager_service_protos.proto |    2 +-
 .../src/main/proto/yarn_protos.proto            |    1 +
 .../apache/hadoop/yarn/api/records/TestURL.java |   99 +
 .../yarn/client/api/impl/AMRMClientImpl.java    |   10 +-
 .../impl/ContainerManagementProtocolProxy.java  |    7 +-
 .../hadoop/yarn/client/cli/RMAdminCLI.java      |   39 +-
 .../TestOpportunisticContainerAllocation.java   |  398 +++
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |    3 +-
 .../impl/pb/ReservationDefinitionPBImpl.java    |   31 +
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java    |   14 +-
 .../yarn/util/ProcfsBasedProcessTree.java       |   26 +-
 .../hadoop/yarn/webapp/YarnWebParams.java       |    1 +
 .../src/main/resources/yarn-default.xml         |    4 +-
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  264 ++
 .../hadoop/yarn/api/TestPBImplRecords.java      |  259 +-
 .../yarn/util/TestProcfsBasedProcessTree.java   |   46 +-
 ...pplicationHistoryManagerOnTimelineStore.java |   14 +-
 ...pplicationHistoryManagerOnTimelineStore.java |   14 +-
 .../OpportunisticContainerAllocator.java        |   22 +-
 .../OpportunisticContainerContext.java          |   49 +-
 .../yarn/server/nodemanager/NodeManager.java    |    3 +-
 .../amrmproxy/DefaultRequestInterceptor.java    |    4 +-
 .../scheduler/DistributedScheduler.java         |   59 +-
 .../impl/container-executor.c                   |   46 +-
 .../impl/container-executor.h                   |   10 +-
 .../main/native/container-executor/impl/main.c  |  264 +-
 .../server/resourcemanager/AdminService.java    |   46 +
 .../resourcemanager/EmbeddedElectorService.java |   59 +-
 ...pportunisticContainerAllocatorAMService.java |  215 +-
 .../server/resourcemanager/RMAppManager.java    |    2 +-
 .../server/resourcemanager/RMServerUtils.java   |    5 +-
 .../server/resourcemanager/ResourceManager.java |   12 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |    2 +-
 .../scheduler/AbstractYarnScheduler.java        |  416 +--
 .../scheduler/SchedulerApplicationAttempt.java  |   66 +-
 .../scheduler/capacity/CapacityScheduler.java   | 1729 +++++++------
 .../scheduler/capacity/LeafQueue.java           |   16 +
 .../scheduler/common/fica/FiCaSchedulerApp.java |    4 +
 .../distributed/NodeQueueLoadMonitor.java       |   45 +-
 .../scheduler/fair/FSLeafQueue.java             |    2 +-
 .../scheduler/fair/FSParentQueue.java           |    6 +-
 .../scheduler/fair/FairScheduler.java           | 1047 ++++----
 .../resourcemanager/webapp/ErrorBlock.java      |   39 +
 .../server/resourcemanager/webapp/RMWebApp.java |    1 +
 .../webapp/RedirectionErrorPage.java            |   47 +
 .../resourcemanager/webapp/RmController.java    |    4 +
 .../webapp/dao/ReservationDefinitionInfo.java   |   11 +
 ...pportunisticContainerAllocatorAMService.java |   10 +-
 .../resourcemanager/TestRMAdminService.java     |  103 +-
 .../resourcemanager/TestRMEmbeddedElector.java  |  191 ++
 .../reservation/ReservationSystemTestUtil.java  |   10 +-
 .../security/TestDelegationTokenRenewer.java    |   24 +-
 .../webapp/TestRedirectionErrorPage.java        |   68 +
 .../yarn/server/webproxy/ProxyUriUtils.java     |   53 +-
 .../server/webproxy/WebAppProxyServlet.java     |  274 +-
 .../server/webproxy/amfilter/AmIpFilter.java    |   64 +-
 .../server/webproxy/TestWebAppProxyServlet.java |   24 +-
 .../server/webproxy/amfilter/TestAmFilter.java  |   29 +-
 .../src/site/markdown/ResourceManagerRest.md    |    3 +
 pom.xml                                         |    1 -
 348 files changed, 16384 insertions(+), 12702 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d70e57a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d70e57a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/51] [abbrv] hadoop git commit: HADOOP-13696. change hadoop-common dependency scope of jsch to provided. Contributed by Yuanbo Liu.

Posted by ae...@apache.org.
HADOOP-13696. change hadoop-common dependency scope of jsch to provided. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cef61d50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cef61d50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cef61d50

Branch: refs/heads/HDFS-7240
Commit: cef61d505e289f074130cc3981c20f7692437cee
Parents: af50da3
Author: Steve Loughran <st...@apache.org>
Authored: Mon Oct 10 12:32:39 2016 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Oct 10 12:32:39 2016 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/pom.xml | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef61d50/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 54d1cdd..92582ae 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -235,6 +235,7 @@
     <dependency>
       <groupId>com.jcraft</groupId>
       <artifactId>jsch</artifactId>
+      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.curator</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/51] [abbrv] hadoop git commit: Merge branch 'trunk' into HADOOP-12756

Posted by ae...@apache.org.
Merge branch 'trunk' into HADOOP-12756


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a57bba47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a57bba47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a57bba47

Branch: refs/heads/HDFS-7240
Commit: a57bba470b396c163baef7ac9447c063180ec15b
Parents: 26d5df3 6a38d11
Author: Kai Zheng <ka...@intel.com>
Authored: Sun Oct 9 10:29:40 2016 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Sun Oct 9 10:29:40 2016 +0800

----------------------------------------------------------------------
 .../IncludePublicAnnotationsJDiffDoclet.java    |    64 +
 .../util/RolloverSignerSecretProvider.java      |     2 +-
 .../util/TestZKSignerSecretProvider.java        |   221 +-
 .../dev-support/findbugsExcludeFile.xml         |     5 +
 .../jdiff/Apache_Hadoop_Common_2.7.2.xml        | 41149 ++++++-----------
 .../org/apache/hadoop/conf/ConfServlet.java     |    19 +-
 .../org/apache/hadoop/conf/Configuration.java   |   284 +-
 .../apache/hadoop/fs/DFCachingGetSpaceUsed.java |    48 +
 .../src/main/java/org/apache/hadoop/fs/DU.java  |     8 +-
 .../apache/hadoop/fs/FileEncryptionInfo.java    |    21 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |    13 +-
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |     6 +-
 .../apache/hadoop/fs/permission/AclEntry.java   |    24 +-
 .../hadoop/fs/permission/AclEntryScope.java     |     2 +-
 .../hadoop/fs/permission/AclEntryType.java      |    23 +-
 .../apache/hadoop/fs/permission/AclStatus.java  |     2 +-
 .../org/apache/hadoop/fs/shell/AclCommands.java |     6 +-
 .../hadoop/fs/shell/CommandWithDestination.java |     5 +-
 .../org/apache/hadoop/fs/viewfs/ViewFs.java     |     2 +-
 .../java/org/apache/hadoop/io/BloomMapFile.java |    11 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java |     9 +-
 .../main/java/org/apache/hadoop/io/MapFile.java |    10 +-
 .../java/org/apache/hadoop/io/SequenceFile.java |    16 +-
 .../apache/hadoop/io/compress/BZip2Codec.java   |     9 +-
 .../apache/hadoop/io/compress/DefaultCodec.java |     9 +-
 .../apache/hadoop/io/compress/GzipCodec.java    |     9 +-
 .../hadoop/io/file/tfile/Compression.java       |    14 +-
 .../org/apache/hadoop/ipc/ExternalCall.java     |    91 +
 .../main/java/org/apache/hadoop/ipc/Server.java |    88 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |     2 +-
 .../apache/hadoop/net/SocksSocketFactory.java   |     4 +-
 .../org/apache/hadoop/security/Credentials.java |     8 +-
 .../hadoop/security/KerberosAuthException.java  |   118 +
 .../hadoop/security/UGIExceptionMessages.java   |    46 +
 .../hadoop/security/UserGroupInformation.java   |   105 +-
 .../org/apache/hadoop/security/token/Token.java |    60 +-
 .../java/org/apache/hadoop/util/LineReader.java |     6 +-
 .../org/apache/hadoop/util/SysInfoWindows.java  |    58 +-
 .../java/org/apache/hadoop/util/hash/Hash.java  |     6 +-
 .../src/main/resources/core-default.xml         |     6 +-
 .../src/site/markdown/FileSystemShell.md        |     3 +-
 .../src/site/markdown/filesystem/filesystem.md  |    77 +-
 .../org/apache/hadoop/conf/TestConfServlet.java |   122 +-
 .../apache/hadoop/conf/TestConfiguration.java   |   140 +-
 .../apache/hadoop/fs/FileContextURIBase.java    |     4 +-
 .../hadoop/fs/TestDFCachingGetSpaceUsed.java    |    75 +
 .../hadoop/fs/TestDelegationTokenRenewer.java   |     3 +-
 .../hadoop/fs/TestFileSystemInitialization.java |    12 +-
 .../AbstractContractRootDirectoryTest.java      |    34 +-
 .../fs/contract/AbstractFSContractTestBase.java |     2 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |    48 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |    85 +
 .../org/apache/hadoop/net/ServerSocketUtil.java |    23 +
 .../security/TestUserGroupInformation.java      |    33 +-
 .../apache/hadoop/util/TestSysInfoWindows.java  |     7 +-
 .../hadoop/crypto/key/kms/server/KMS.java       |    76 +-
 .../hadoop/crypto/key/kms/server/KMSWebApp.java |     2 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   |    76 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |     4 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |     9 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java    |   146 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |    30 +
 .../hdfs/client/CreateEncryptionZoneFlag.java   |    70 +
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |   536 +
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |    86 +
 .../apache/hadoop/hdfs/client/package-info.java |    27 +
 .../server/datanode/DiskBalancerWorkItem.java   |     2 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java    |    88 +-
 .../hdfs/web/resources/AclPermissionParam.java  |    23 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |     1 -
 .../jdiff/Apache_Hadoop_HDFS_2.7.2.xml          | 21704 +--------
 .../src/contrib/bkjournal/README.txt            |    66 -
 .../dev-support/findbugsExcludeFile.xml         |     5 -
 .../hadoop-hdfs/src/contrib/bkjournal/pom.xml   |   175 -
 .../bkjournal/BookKeeperEditLogInputStream.java |   264 -
 .../BookKeeperEditLogOutputStream.java          |   188 -
 .../bkjournal/BookKeeperJournalManager.java     |   893 -
 .../contrib/bkjournal/CurrentInprogress.java    |   160 -
 .../bkjournal/EditLogLedgerMetadata.java        |   217 -
 .../hadoop/contrib/bkjournal/MaxTxId.java       |   103 -
 .../bkjournal/src/main/proto/bkjournal.proto    |    49 -
 .../hadoop/contrib/bkjournal/BKJMUtil.java      |   184 -
 .../bkjournal/TestBookKeeperAsHASharedDir.java  |   414 -
 .../bkjournal/TestBookKeeperConfiguration.java  |   174 -
 .../bkjournal/TestBookKeeperEditLogStreams.java |    92 -
 .../bkjournal/TestBookKeeperHACheckpoints.java  |   109 -
 .../bkjournal/TestBookKeeperJournalManager.java |   984 -
 .../TestBookKeeperSpeculativeRead.java          |   167 -
 .../bkjournal/TestBootstrapStandbyWithBKJM.java |   170 -
 .../bkjournal/TestCurrentInprogress.java        |   160 -
 .../hdfs/server/namenode/FSEditLogTestUtil.java |    40 -
 .../src/test/resources/log4j.properties         |    55 -
 .../java/org/apache/hadoop/hdfs/HAUtil.java     |     5 +-
 .../hdfs/client/CreateEncryptionZoneFlag.java   |    71 -
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |   524 -
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |    86 -
 .../apache/hadoop/hdfs/client/package-info.java |    27 -
 .../block/BlockPoolTokenSecretManager.java      |     3 +-
 .../token/block/BlockTokenSecretManager.java    |     6 +
 .../hadoop/hdfs/server/balancer/Dispatcher.java |     2 +-
 .../server/blockmanagement/BlockManager.java    |    70 +-
 .../BlockPlacementPolicyDefault.java            |     5 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |     5 +
 .../hdfs/server/datanode/DiskBalancer.java      |   210 +-
 .../erasurecode/StripedBlockReader.java         |     2 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java |     3 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |    11 +-
 .../datanode/fsdataset/impl/ReplicaMap.java     |    41 +-
 .../server/diskbalancer/planner/MoveStep.java   |     6 +-
 .../hdfs/server/namenode/CacheManager.java      |    12 +-
 .../ContentSummaryComputationContext.java       |    94 +-
 .../namenode/EncryptionFaultInjector.java       |     6 +
 .../server/namenode/EncryptionZoneManager.java  |    25 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |     4 +-
 .../hdfs/server/namenode/FSDirAttrOp.java       |   122 +-
 .../hdfs/server/namenode/FSDirDeleteOp.java     |    67 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |   148 +-
 .../server/namenode/FSDirErasureCodingOp.java   |     2 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java      |     2 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |   200 +-
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |    22 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   281 +-
 .../hdfs/server/namenode/FSDirSymlinkOp.java    |     2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   163 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java      |    27 +-
 .../hdfs/server/namenode/FSDirectory.java       |    94 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |    94 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   244 +-
 .../hdfs/server/namenode/FSNamesystemLock.java  |   187 +-
 .../hadoop/hdfs/server/namenode/INode.java      |     1 +
 .../server/namenode/INodeAttributeProvider.java |    10 +-
 .../hdfs/server/namenode/INodeDirectory.java    |    11 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |     1 +
 .../hadoop/hdfs/server/namenode/INodeId.java    |    16 -
 .../hdfs/server/namenode/INodeReference.java    |     2 +
 .../hdfs/server/namenode/INodeSymlink.java      |     1 +
 .../hdfs/server/namenode/INodesInPath.java      |     7 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |    23 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |     2 +
 .../hdfs/server/namenode/XAttrStorage.java      |     7 +-
 .../ha/RequestHedgingProxyProvider.java         |    25 +-
 .../snapshot/DirectorySnapshottableFeature.java |     9 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |    14 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |     1 +
 .../server/protocol/RemoteEditLogManifest.java  |     7 +-
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   |    51 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |     2 +-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    |     2 +-
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto |     2 +-
 .../src/main/resources/hdfs-default.xml         |     6 +-
 .../src/site/markdown/FaultInjectFramework.md   |   254 -
 .../src/site/markdown/HDFSCommands.md           |     8 +-
 .../src/site/markdown/HDFSDiskbalancer.md       |     5 +-
 .../src/site/markdown/HDFSErasureCoding.md      |    11 +-
 .../markdown/HDFSHighAvailabilityWithNFS.md     |   114 -
 .../src/site/markdown/TransparentEncryption.md  |    16 +
 .../hadoop-hdfs/src/site/markdown/ViewFs.md     |     2 +-
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md    |     2 +-
 .../hadoop/fs/TestEnhancedByteBufferAccess.java |    17 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   103 +-
 .../hadoop/hdfs/TestDFSClientRetries.java       |     4 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java    |  2021 +-
 .../hadoop/hdfs/TestEncryptedTransfer.java      |   742 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   109 +-
 .../apache/hadoop/hdfs/TestFileCorruption.java  |    61 +
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |     1 +
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  |   119 +
 .../hadoop/hdfs/protocolPB/TestPBHelper.java    |    30 +-
 .../TestPendingInvalidateBlock.java             |    37 +-
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |     2 +-
 .../fsdataset/impl/TestDatanodeRestart.java     |    13 +-
 .../impl/TestInterDatanodeProtocol.java         |     3 +-
 .../datanode/fsdataset/impl/TestReplicaMap.java |     3 +-
 .../fsdataset/impl/TestWriteToReplica.java      |     3 +-
 .../server/diskbalancer/TestDiskBalancer.java   |   156 +
 .../server/namenode/TestCacheDirectives.java    |    10 +
 .../namenode/TestDiskspaceQuotaUpdate.java      |   301 +-
 .../hdfs/server/namenode/TestFSDirectory.java   |    48 +
 .../hdfs/server/namenode/TestFSNamesystem.java  |   251 -
 .../server/namenode/TestFSNamesystemLock.java   |   317 +
 .../hadoop/hdfs/server/namenode/TestFsck.java   |  2403 +-
 .../server/namenode/TestNameNodeMXBean.java     |    34 +-
 .../server/namenode/TestSnapshotPathINodes.java |    22 +
 .../server/namenode/ha/TestEditLogTailer.java   |    42 +-
 .../ha/TestRequestHedgingProxyProvider.java     |    18 +-
 .../snapshot/TestRenameWithSnapshots.java       |   199 +
 .../shortcircuit/TestShortCircuitCache.java     |     9 +-
 .../hadoop/hdfs/web/TestWebHdfsTokens.java      |    27 +-
 .../apache/hadoop/security/TestPermission.java  |   131 +-
 .../src/test/resources/testCryptoConf.xml       |    90 +
 hadoop-hdfs-project/pom.xml                     |     1 -
 .../Apache_Hadoop_MapReduce_Common_2.7.2.xml    |   727 +-
 .../Apache_Hadoop_MapReduce_Core_2.7.2.xml      |  7059 +--
 .../Apache_Hadoop_MapReduce_JobClient_2.7.2.xml |   962 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java    |    90 +-
 .../mapreduce/v2/app/TaskHeartbeatHandler.java  |    24 +-
 .../v2/app/rm/RMContainerAllocator.java         |    51 +-
 .../hadoop/mapreduce/v2/app/TestRecovery.java   |    66 +
 .../v2/app/TestTaskHeartbeatHandler.java        |    67 +
 .../v2/app/launcher/TestContainerLauncher.java  |    30 +
 .../app/launcher/TestContainerLauncherImpl.java |    30 +
 .../v2/app/rm/TestRMContainerAllocator.java     |    46 +
 .../mapreduce/v2/app/webapp/TestAMWebApp.java   |     8 +-
 .../hadoop-mapreduce-client-core/pom.xml        |     6 +-
 .../java/org/apache/hadoop/mapred/Master.java   |    70 +-
 .../java/org/apache/hadoop/mapred/Task.java     |     8 +-
 .../apache/hadoop/mapreduce/MRJobConfig.java    |    11 +-
 .../hadoop/mapreduce/util/MRJobConfUtil.java    |    16 +
 .../src/main/resources/mapred-default.xml       |    10 +-
 .../org/apache/hadoop/mapred/TestMaster.java    |    56 +-
 .../mapreduce/v2/hs/HistoryFileManager.java     |    21 +
 .../hadoop/mapred/ResourceMgrDelegate.java      |     6 +
 .../apache/hadoop/mapred/JobClientUnitTest.java |    34 +-
 .../hadoop-mapreduce-client/pom.xml             |     4 +-
 .../maven/plugin/cmakebuilder/CompileMojo.java  |     4 +-
 .../maven/plugin/cmakebuilder/TestMojo.java     |     4 +-
 .../hadoop/maven/plugin/protoc/ProtocMojo.java  |     4 +
 .../apache/hadoop/maven/plugin/util/Exec.java   |     6 +-
 .../plugin/versioninfo/VersionInfoMojo.java     |    55 +-
 hadoop-project-dist/pom.xml                     |    21 +-
 hadoop-project/pom.xml                          |     8 +-
 hadoop-tools/hadoop-aws/pom.xml                 |    12 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |   171 +-
 .../hadoop/fs/s3a/S3AInstrumentation.java       |    10 +
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |    17 +
 .../org/apache/hadoop/fs/s3a/Statistic.java     |     4 +
 .../src/site/markdown/tools/hadoop-aws/index.md |    42 +-
 .../hadoop/fs/s3a/ITestS3AConfiguration.java    |     8 +
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |    55 -
 .../fs/s3a/ITestS3AFileOperationCost.java       |    85 +
 .../hadoop/fs/s3a/ITestS3AMiscOperations.java   |    63 +
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |    13 +-
 .../fs/s3a/TestS3AExceptionTranslation.java     |   127 +
 hadoop-tools/hadoop-azure-datalake/pom.xml      |     4 +
 ...ClientCredentialBasedAccesTokenProvider.java |     5 +-
 hadoop-tools/hadoop-azure/pom.xml               |     6 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |    16 +-
 .../tools/mapred/CopyMapper_Counter.properties  |    24 +
 .../contract/AbstractContractDistCpTest.java    |     2 +-
 hadoop-tools/hadoop-kafka/pom.xml               |     2 +-
 hadoop-tools/hadoop-openstack/pom.xml           |    18 +-
 .../swift/auth/ApiKeyAuthenticationRequest.java |     2 +-
 .../fs/swift/auth/entities/AccessToken.java     |     2 +-
 .../hadoop/fs/swift/auth/entities/Catalog.java  |     2 +-
 .../hadoop/fs/swift/auth/entities/Endpoint.java |     2 +-
 .../hadoop/fs/swift/auth/entities/Tenant.java   |     2 +-
 .../hadoop/fs/swift/auth/entities/User.java     |     2 +-
 .../snative/SwiftNativeFileSystemStore.java     |     3 +-
 .../apache/hadoop/fs/swift/util/JSONUtil.java   |    24 +-
 hadoop-tools/hadoop-rumen/pom.xml               |     9 +
 .../apache/hadoop/tools/rumen/Anonymizer.java   |    23 +-
 .../hadoop/tools/rumen/HadoopLogsAnalyzer.java  |     3 +-
 .../tools/rumen/JsonObjectMapperParser.java     |    17 +-
 .../tools/rumen/JsonObjectMapperWriter.java     |    21 +-
 .../apache/hadoop/tools/rumen/LoggedJob.java    |     2 +-
 .../hadoop/tools/rumen/LoggedLocation.java      |     2 +-
 .../tools/rumen/LoggedNetworkTopology.java      |     2 +-
 .../rumen/LoggedSingleRelativeRanking.java      |     4 +-
 .../apache/hadoop/tools/rumen/LoggedTask.java   |     2 +-
 .../hadoop/tools/rumen/LoggedTaskAttempt.java   |     2 +-
 .../hadoop/tools/rumen/datatypes/NodeName.java  |     2 +-
 .../rumen/serializers/BlockingSerializer.java   |    10 +-
 .../DefaultAnonymizingRumenSerializer.java      |     8 +-
 .../serializers/DefaultRumenSerializer.java     |     9 +-
 .../serializers/ObjectStringSerializer.java     |    10 +-
 .../apache/hadoop/tools/rumen/state/State.java  |     2 +-
 .../tools/rumen/state/StateDeserializer.java    |    14 +-
 .../hadoop/tools/rumen/state/StatePool.java     |    36 +-
 .../hadoop/tools/rumen/TestHistograms.java      |    13 +-
 hadoop-tools/hadoop-sls/pom.xml                 |     4 +
 .../hadoop/yarn/sls/RumenToSLSConverter.java    |     8 +-
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |     7 +-
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  |    10 +-
 .../jdiff/Apache_Hadoop_YARN_API_2.7.2.xml      |   530 +-
 .../jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml   |   613 +-
 .../jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml   |  1185 +-
 .../Apache_Hadoop_YARN_Server_Common_2.7.2.xml  |  1056 +-
 .../yarn/api/ContainerManagementProtocol.java   |    54 +
 .../api/protocolrecords/CommitResponse.java     |    42 +
 .../protocolrecords/KillApplicationRequest.java |    18 +
 .../ReInitializeContainerRequest.java           |   110 +
 .../ReInitializeContainerResponse.java          |    38 +
 .../RestartContainerResponse.java               |    38 +
 .../api/protocolrecords/RollbackResponse.java   |    42 +
 .../records/ApplicationSubmissionContext.java   |    21 +
 .../api/records/ApplicationTimeoutType.java     |    41 +
 .../yarn/api/records/ReservationDefinition.java |    44 +-
 .../org/apache/hadoop/yarn/api/records/URL.java |    58 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |    24 +-
 .../ReplaceLabelsOnNodeRequest.java             |     8 +
 .../proto/containermanagement_protocol.proto    |     6 +
 ..._server_resourcemanager_service_protos.proto |     2 +-
 .../src/main/proto/yarn_protos.proto            |    11 +
 .../src/main/proto/yarn_service_protos.proto    |    19 +
 .../apache/hadoop/yarn/api/records/TestURL.java |    99 +
 .../hadoop/yarn/client/api/YarnClient.java      |    14 +
 .../yarn/client/api/impl/AMRMClientImpl.java    |    10 +-
 .../impl/ContainerManagementProtocolProxy.java  |     7 +-
 .../yarn/client/api/impl/YarnClientImpl.java    |    35 +-
 .../hadoop/yarn/client/cli/RMAdminCLI.java      |    39 +-
 .../TestOpportunisticContainerAllocation.java   |   398 +
 .../yarn/client/api/impl/TestYarnClient.java    |    52 +
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |     3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java     |    10 +-
 ...ContainerManagementProtocolPBClientImpl.java |    73 +
 ...ontainerManagementProtocolPBServiceImpl.java |    86 +-
 .../impl/pb/CommitResponsePBImpl.java           |    67 +
 .../impl/pb/KillApplicationRequestPBImpl.java   |    18 +
 .../pb/ReInitializeContainerRequestPBImpl.java  |   173 +
 .../pb/ReInitializeContainerResponsePBImpl.java |    68 +
 .../impl/pb/RestartContainerResponsePBImpl.java |    67 +
 .../impl/pb/RollbackResponsePBImpl.java         |    67 +
 .../pb/ApplicationSubmissionContextPBImpl.java  |    83 +
 .../records/impl/pb/ContainerStatusPBImpl.java  |     2 +
 .../yarn/api/records/impl/pb/ProtoUtils.java    |    19 +
 .../impl/pb/ReservationDefinitionPBImpl.java    |    31 +
 .../client/api/impl/TimelineClientImpl.java     |     3 +-
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java    |    14 +-
 .../yarn/util/AbstractLivelinessMonitor.java    |    32 +-
 .../hadoop/yarn/webapp/YarnWebParams.java       |     3 +-
 .../src/main/resources/yarn-default.xml         |    28 +-
 .../hadoop/yarn/TestContainerLaunchRPC.java     |    30 +
 .../yarn/TestContainerResourceIncreaseRPC.java  |    30 +
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |   264 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |   269 +-
 ...pplicationHistoryManagerOnTimelineStore.java |    14 +-
 ...pplicationHistoryManagerOnTimelineStore.java |    14 +-
 .../metrics/ApplicationMetricsConstants.java    |     3 +
 .../OpportunisticContainerAllocator.java        |    22 +-
 .../OpportunisticContainerContext.java          |    49 +-
 .../java/org/apache/hadoop/yarn/TestRPC.java    |    30 +
 .../yarn/server/nodemanager/NMAuditLogger.java  |     4 +
 .../yarn/server/nodemanager/NodeManager.java    |     3 +-
 .../amrmproxy/DefaultRequestInterceptor.java    |     4 +-
 .../containermanager/ContainerManagerImpl.java  |    53 +-
 .../container/ContainerImpl.java                |    92 +-
 .../monitor/ContainersMonitorImpl.java          |    31 +-
 .../nodemanager/metrics/NodeManagerMetrics.java |    26 +
 .../scheduler/DistributedScheduler.java         |    59 +-
 .../impl/container-executor.c                   |    46 +-
 .../impl/container-executor.h                   |    10 +-
 .../main/native/container-executor/impl/main.c  |   264 +-
 .../TestContainerManagerWithLCE.java            |    12 +
 .../containermanager/TestContainerManager.java  |   103 +-
 .../monitor/TestContainersMonitor.java          |    39 +
 .../server/resourcemanager/AdminService.java    |    46 +
 .../server/resourcemanager/ClientRMService.java |    20 +-
 ...pportunisticContainerAllocatorAMService.java |   215 +-
 .../resourcemanager/RMActiveServiceContext.java |    16 +
 .../server/resourcemanager/RMAppManager.java    |     4 +
 .../server/resourcemanager/RMAuditLogger.java   |     3 +
 .../yarn/server/resourcemanager/RMContext.java  |     5 +
 .../server/resourcemanager/RMContextImpl.java   |    12 +
 .../server/resourcemanager/RMServerUtils.java   |    21 +-
 .../yarn/server/resourcemanager/RMZKUtils.java  |    19 +-
 .../server/resourcemanager/ResourceManager.java |    23 +-
 .../resourcemanager/amlauncher/AMLauncher.java  |    21 -
 .../metrics/TimelineServiceV1Publisher.java     |     6 +
 .../metrics/TimelineServiceV2Publisher.java     |     5 +
 .../recovery/ZKRMStateStore.java                |   269 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |    49 +-
 .../rmapp/monitor/RMAppLifetimeMonitor.java     |   130 +
 .../rmapp/monitor/RMAppToMonitor.java           |    77 +
 .../rmapp/monitor/package-info.java             |    28 +
 .../scheduler/AbstractYarnScheduler.java        |   416 +-
 .../scheduler/AppSchedulingInfo.java            |   619 +-
 .../scheduler/SchedulerApplicationAttempt.java  |    66 +-
 .../scheduler/capacity/CapacityScheduler.java   |  1729 +-
 .../scheduler/capacity/LeafQueue.java           |    16 +
 .../scheduler/common/fica/FiCaSchedulerApp.java |     4 +
 .../distributed/NodeQueueLoadMonitor.java       |    45 +-
 .../scheduler/fair/FSLeafQueue.java             |     2 +-
 .../scheduler/fair/FSParentQueue.java           |     6 +-
 .../scheduler/fair/FairScheduler.java           |  1047 +-
 .../resourcemanager/webapp/ErrorBlock.java      |    39 +
 .../server/resourcemanager/webapp/RMWebApp.java |     1 +
 .../resourcemanager/webapp/RMWebServices.java   |    24 +-
 .../webapp/RedirectionErrorPage.java            |    47 +
 .../resourcemanager/webapp/RmController.java    |     4 +
 .../resourcemanager/webapp/dao/AppState.java    |     8 +
 .../webapp/dao/ReservationDefinitionInfo.java   |    11 +
 .../yarn/server/resourcemanager/MockRM.java     |    22 +-
 .../server/resourcemanager/NodeManager.java     |    29 +
 .../resourcemanager/TestAMAuthorization.java    |    31 +
 .../TestApplicationMasterLauncher.java          |    30 +
 .../resourcemanager/TestClientRMService.java    |     7 +-
 ...pportunisticContainerAllocatorAMService.java |    10 +-
 .../resourcemanager/TestRMAdminService.java     |   103 +-
 .../metrics/TestSystemMetricsPublisher.java     |    18 +
 .../TestSystemMetricsPublisherForV2.java        |     9 +
 .../reservation/ReservationSystemTestUtil.java  |    10 +-
 .../rmapp/TestApplicationLifetimeMonitor.java   |   165 +
 .../TestRMWebServicesAppsModification.java      |     4 +
 .../webapp/TestRedirectionErrorPage.java        |    68 +
 .../yarn/server/webproxy/ProxyUriUtils.java     |    53 +-
 .../server/webproxy/WebAppProxyServlet.java     |   274 +-
 .../server/webproxy/amfilter/AmIpFilter.java    |    64 +-
 .../server/webproxy/TestWebAppProxyServlet.java |    24 +-
 .../server/webproxy/amfilter/TestAmFilter.java  |    29 +-
 .../src/site/markdown/CapacityScheduler.md      |     2 +-
 .../src/site/markdown/NodeLabel.md              |     2 +-
 .../src/site/markdown/ReservationSystem.md      |     2 +-
 .../src/site/markdown/ResourceManagerRest.md    |     3 +
 .../site/markdown/WritingYarnApplications.md    |     6 +-
 .../hadoop-yarn-site/src/site/markdown/YARN.md  |     6 +-
 hadoop-yarn-project/hadoop-yarn/pom.xml         |     4 +-
 pom.xml                                         |     1 -
 407 files changed, 32657 insertions(+), 72369 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a57bba47/hadoop-project/pom.xml
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/51] [abbrv] hadoop git commit: HADOOP-13701. AbstractContractRootDirectoryTest can fail when handling delete "/". Contributed by Genmao Yu

Posted by ae...@apache.org.
HADOOP-13701. AbstractContractRootDirectoryTest can fail when handling delete "/". Contributed by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c31b5e61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c31b5e61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c31b5e61

Branch: refs/heads/HDFS-7240
Commit: c31b5e61b1f09949548116309218a2b3e9c0beda
Parents: a57bba4
Author: Kai Zheng <ka...@intel.com>
Authored: Sat Oct 8 17:35:59 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Sat Oct 8 17:35:59 2016 +0600

----------------------------------------------------------------------
 .../fs/aliyun/oss/AliyunOSSFileSystem.java      | 39 +++++++++++++++++++-
 1 file changed, 38 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c31b5e61/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 81e038d..3b266c8 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 
@@ -53,6 +54,7 @@ public class AliyunOSSFileSystem extends FileSystem {
   private static final Logger LOG =
       LoggerFactory.getLogger(AliyunOSSFileSystem.class);
   private URI uri;
+  private String bucket;
   private Path workingDir;
   private AliyunOSSFileSystemStore store;
   private int maxKeys;
@@ -124,11 +126,20 @@ public class AliyunOSSFileSystem extends FileSystem {
   private boolean innerDelete(FileStatus status, boolean recursive)
       throws IOException {
     Path f = status.getPath();
+    String p = f.toUri().getPath();
+    FileStatus[] statuses;
+    // indicating root directory "/".
+    if (p.equals("/")) {
+      statuses = listStatus(status.getPath());
+      boolean isEmptyDir = statuses.length <= 0;
+      return rejectRootDirectoryDelete(isEmptyDir, recursive);
+    }
+
     String key = pathToKey(f);
     if (status.isDirectory()) {
       if (!recursive) {
-        FileStatus[] statuses = listStatus(status.getPath());
         // Check whether it is an empty directory or not
+        statuses = listStatus(status.getPath());
         if (statuses.length > 0) {
           throw new IOException("Cannot remove directory " + f +
               ": It is not empty!");
@@ -148,6 +159,31 @@ public class AliyunOSSFileSystem extends FileSystem {
     return true;
   }
 
+  /**
+   * Implements the specific logic to reject root directory deletion.
+   * The caller must return the result of this call, rather than
+   * attempt to continue with the delete operation: deleting root
+   * directories is never allowed. This method simply implements
+   * the policy of when to return an exit code versus raise an exception.
+   * @param isEmptyDir empty directory or not
+   * @param recursive recursive flag from command
+   * @return a return code for the operation
+   * @throws PathIOException if the operation was explicitly rejected.
+   */
+  private boolean rejectRootDirectoryDelete(boolean isEmptyDir,
+      boolean recursive) throws IOException {
+    LOG.info("oss delete the {} root directory of {}", bucket, recursive);
+    if (isEmptyDir) {
+      return true;
+    }
+    if (recursive) {
+      return false;
+    } else {
+      // reject
+      throw new PathIOException(bucket, "Cannot delete root path");
+    }
+  }
+
   private void createFakeDirectoryIfNecessary(Path f) throws IOException {
     String key = pathToKey(f);
     if (StringUtils.isNotEmpty(key) && !exists(f)) {
@@ -226,6 +262,7 @@ public class AliyunOSSFileSystem extends FileSystem {
   public void initialize(URI name, Configuration conf) throws IOException {
     super.initialize(name, conf);
 
+    bucket = name.getHost();
     uri = java.net.URI.create(name.getScheme() + "://" + name.getAuthority());
     workingDir = new Path("/user",
         System.getProperty("user.name")).makeQualified(uri, null);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/51] [abbrv] hadoop git commit: HDFS-10895. Update HDFS Erasure Coding doc to add how to use ISA-L based coder. Contributed by Sammi Chen

Posted by ae...@apache.org.
HDFS-10895. Update HDFS Erasure Coding doc to add how to use ISA-L based coder. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af50da32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af50da32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af50da32

Branch: refs/heads/HDFS-7240
Commit: af50da3298f92a52cc20d5f6aab6f6ad8134efbd
Parents: 3d59b18
Author: Kai Zheng <ka...@intel.com>
Authored: Mon Oct 10 11:55:49 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Mon Oct 10 11:55:49 2016 +0600

----------------------------------------------------------------------
 .../src/site/markdown/HDFSErasureCoding.md           | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af50da32/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 18b3a25..627260f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -22,6 +22,7 @@ HDFS Erasure Coding
     * [Deployment](#Deployment)
         * [Cluster and hardware configuration](#Cluster_and_hardware_configuration)
         * [Configuration keys](#Configuration_keys)
+        * [Enable Intel ISA-L](#Enable_Intel_ISA-L)
         * [Administrative commands](#Administrative_commands)
 
 Purpose
@@ -73,6 +74,9 @@ Architecture
 
     There are three policies currently being supported: RS-DEFAULT-3-2-64k, RS-DEFAULT-6-3-64k and RS-LEGACY-6-3-64k. All with default cell size of 64KB. The system default policy is RS-DEFAULT-6-3-64k which use the default schema RS_6_3_SCHEMA with a cell size of 64KB.
 
+ *  **Intel ISA-L**
+    Intel ISA-L stands for Intel Intelligent Storage Acceleration Library. ISA-L is a collection of optimized low-level functions used primarily in storage applications. It includes a fast block Reed-Solomon type erasure codes optimized for Intel AVX and AVX2 instruction sets.
+    HDFS EC can leverage this open-source library to accelerate encoding and decoding calculation. ISA-L supports most of major operating systems, including Linux and Windows. By default, ISA-L is not enabled in HDFS.
 
 Deployment
 ----------
@@ -98,7 +102,7 @@ Deployment
   `io.erasurecode.codec.rs-default.rawcoder` for the default RS codec,
   `io.erasurecode.codec.rs-legacy.rawcoder` for the legacy RS codec,
   `io.erasurecode.codec.xor.rawcoder` for the XOR codec.
-  The default implementations for all of these codecs are pure Java.
+  The default implementations for all of these codecs are pure Java. For default RS codec, there is also a native implementation which leverages Intel ISA-L library to improve the encoding and decoding calculation. Please refer to section "Enable Intel ISA-L" for more detail information.
 
   Erasure coding background recovery work on the DataNodes can also be tuned via the following configuration parameters:
 
@@ -106,6 +110,15 @@ Deployment
   1. `dfs.datanode.stripedread.threads` - Number of concurrent reader threads. Default value is 20 threads.
   1. `dfs.datanode.stripedread.buffer.size` - Buffer size for reader service. Default value is 256KB.
 
+### Enable Intel ISA-L
+
+  HDFS native implementation of default RS codec leverages Intel ISA-L library to improve the encoding and decoding calculation. To enable and use Intel ISA-L, there are three steps.
+  1. Build ISA-L library. Please refer to the offical site "https://github.com/01org/isa-l/" for detail information.
+  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build options" section in "Build instructions for Hadoop"(BUILDING.txt) document. Use -Dbundle.isal to copy the contents of the isal.lib directory into the final tar file. Deploy hadoop with the tar file. Make sure ISA-L library is available on both HDFS client and DataNodes.
+  3. Configure the `io.erasurecode.codec.rs-default.rawcoder` key with value `org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on HDFS client and DataNodes.
+
+  To check ISA-L library enable state, try "Hadoop checknative" command. It will tell you if ISA-L library is enabled or not.
+
 ### Administrative commands
 
   HDFS provides an `erasurecode` subcommand to perform administrative commands related to erasure coding.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/51] [abbrv] hadoop git commit: HDFS-10903. Replace config key literal strings with config key names II: hadoop hdfs. Contributed by Chen Liang

Posted by ae...@apache.org.
HDFS-10903. Replace config key literal strings with config key names II: hadoop hdfs. Contributed by Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c9a0106
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c9a0106
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c9a0106

Branch: refs/heads/HDFS-7240
Commit: 3c9a01062e9097c2ed1db75318482543db2e382f
Parents: 61f0490
Author: Mingliang Liu <li...@apache.org>
Authored: Tue Oct 11 16:29:30 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Oct 11 16:29:30 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/http/server/FSOperations.java | 9 +++++++--
 .../hadoop/lib/service/hadoop/FileSystemAccessService.java  | 6 ++++--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +++
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml         | 8 ++++++++
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java   | 3 ++-
 .../hdfs/server/blockmanagement/TestBlockTokenWithDFS.java  | 3 ++-
 6 files changed, 26 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 46948f9..001bc92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -48,6 +48,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
+
 /**
  * FileSystem operation executors used by {@link HttpFSServer}.
  */
@@ -462,7 +465,8 @@ public class FSOperations {
         blockSize = fs.getDefaultBlockSize(path);
       }
       FsPermission fsPermission = new FsPermission(permission);
-      int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
+      int bufferSize = fs.getConf().getInt(HTTPFS_BUFFER_SIZE_KEY,
+          HTTP_BUFFER_SIZE_DEFAULT);
       OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
       IOUtils.copyBytes(is, os, bufferSize, true);
       os.close();
@@ -752,7 +756,8 @@ public class FSOperations {
      */
     @Override
     public InputStream execute(FileSystem fs) throws IOException {
-      int bufferSize = HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096);
+      int bufferSize = HttpFSServerWebApp.get().getConfig().getInt(
+          HTTPFS_BUFFER_SIZE_KEY, HTTP_BUFFER_SIZE_DEFAULT);
       return fs.open(path, bufferSize);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
index 0b767be..61d3b45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
@@ -50,6 +50,8 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
 @InterfaceAudience.Private
 public class FileSystemAccessService extends BaseService implements FileSystemAccess {
   private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class);
@@ -159,7 +161,7 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
         throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
       }
       Configuration conf = new Configuration();
-      conf.set("hadoop.security.authentication", "kerberos");
+      conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
       UserGroupInformation.setConfiguration(conf);
       try {
         UserGroupInformation.loginUserFromKeytab(principal, keytab);
@@ -169,7 +171,7 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
       LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
     } else if (security.equals("simple")) {
       Configuration conf = new Configuration();
-      conf.set("hadoop.security.authentication", "simple");
+      conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
       UserGroupInformation.setConfiguration(conf);
       LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index df45e2a..18209ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -992,6 +992,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.disk.balancer.plan.threshold.percent";
   public static final int DFS_DISK_BALANCER_PLAN_THRESHOLD_DEFAULT = 10;
 
+  public static final String HTTPFS_BUFFER_SIZE_KEY =
+      "httpfs.buffer.size";
+  public static final int HTTP_BUFFER_SIZE_DEFAULT = 4096;
 
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 672b597..db4035d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4273,4 +4273,12 @@
       consecutive warnings within this interval.</description>
   </property>
 
+  <property>
+    <name>httpfs.buffer.size</name>
+    <value>4096</value>
+    <description>
+      The size buffer to be used when creating or opening httpfs filesystem IO stream.
+    </description>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
index 4147851..ae0f0c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -89,7 +90,7 @@ public class TestFileAppend4 {
     
     // handle failures in the DFSClient pipeline quickly
     // (for cluster.shutdown(); fs.close() idiom)
-    conf.setInt("ipc.client.connect.max.retries", 1);
+    conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
   }
   
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 9374ae8..5a8a39a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -218,7 +219,7 @@ public class TestBlockTokenWithDFS {
     conf.setInt("io.bytes.per.checksum", BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
-    conf.setInt("ipc.client.connect.max.retries", 0);
+    conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     return conf;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/51] [abbrv] hadoop git commit: YARN-5551. Ignore file backed pages from memory computation when smaps is enabled. Contributed by Rajesh Balamohan

Posted by ae...@apache.org.
YARN-5551. Ignore file backed pages from memory computation when smaps is enabled. Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecb51b85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecb51b85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecb51b85

Branch: refs/heads/HDFS-7240
Commit: ecb51b857ac7faceff981b2b6f22ea1af0d42ab1
Parents: 96b1266
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Oct 11 15:12:43 2016 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Oct 11 15:12:43 2016 +0000

----------------------------------------------------------------------
 .../yarn/util/ProcfsBasedProcessTree.java       | 26 ++++++-----
 .../yarn/util/TestProcfsBasedProcessTree.java   | 46 ++++++++++----------
 2 files changed, 39 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecb51b85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 80d49c3..29bc277 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -406,15 +406,14 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
                 continue;
               }
 
-              total +=
-                  Math.min(info.sharedDirty, info.pss) + info.privateDirty
-                      + info.privateClean;
+              // Account for anonymous to know the amount of
+              // memory reclaimable by killing the process
+              total += info.anonymous;
+
               if (LOG.isDebugEnabled()) {
                 LOG.debug(" total(" + olderThanAge + "): PID : " + p.getPid()
-                    + ", SharedDirty : " + info.sharedDirty + ", PSS : "
-                    + info.pss + ", Private_Dirty : " + info.privateDirty
-                    + ", Private_Clean : " + info.privateClean + ", total : "
-                    + (total * KB_TO_BYTES));
+                    + ", info : " + info.toString()
+                    + ", total : " + (total * KB_TO_BYTES));
               }
             }
           }
@@ -877,6 +876,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
     private int sharedDirty;
     private int privateClean;
     private int privateDirty;
+    private int anonymous;
     private int referenced;
     private String regionName;
     private String permission;
@@ -929,6 +929,10 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
       return referenced;
     }
 
+    public int getAnonymous() {
+      return anonymous;
+    }
+
     public void setMemInfo(String key, String value) {
       MemInfo info = MemInfo.getMemInfoByName(key);
       int val = 0;
@@ -969,6 +973,9 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
       case REFERENCED:
         referenced = val;
         break;
+      case ANONYMOUS:
+        anonymous = val;
+        break;
       default:
         break;
       }
@@ -999,10 +1006,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
         .append(MemInfo.REFERENCED.name + ":" + this.getReferenced())
         .append(" kB\n");
       sb.append("\t")
-        .append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
-        .append(" kB\n");
-      sb.append("\t")
-        .append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
+        .append(MemInfo.ANONYMOUS.name + ":" + this.getAnonymous())
         .append(" kB\n");
       return sb.toString();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecb51b85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index fa4e8c8..841d333 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -369,21 +369,24 @@ public class TestProcfsBasedProcessTree {
       List<ProcessSmapMemoryInfo> memoryMappingList =
           procMemInfo[i].getMemoryInfoList();
       memoryMappingList.add(constructMemoryMappingInfo(
-        "7f56c177c000-7f56c177d000 "
+          "7f56c177c000-7f56c177d000 "
             + "rw-p 00010000 08:02 40371558                   "
             + "/grid/0/jdk1.7.0_25/jre/lib/amd64/libnio.so",
-        new String[] { "4", "4", "25", "4", "25", "15", "10", "4", "0", "0",
-            "0", "4", "4" }));
+            // Format: size, rss, pss, shared_clean, shared_dirty, private_clean
+            // private_dirty, referenced, anon, anon-huge-pages, swap,
+            // kernel_page_size, mmu_page_size
+            new String[] {"4", "4", "25", "4", "25", "15", "10", "4", "10", "0",
+                "0", "4", "4"}));
       memoryMappingList.add(constructMemoryMappingInfo(
-        "7fb09382e000-7fb09382f000 r--s 00003000 " + "08:02 25953545",
-        new String[] { "4", "4", "25", "4", "0", "15", "10", "4", "0", "0",
-            "0", "4", "4" }));
+          "7fb09382e000-7fb09382f000 r--s 00003000 " + "08:02 25953545",
+          new String[] {"4", "4", "25", "4", "0", "15", "10", "4", "10", "0",
+              "0", "4", "4"}));
       memoryMappingList.add(constructMemoryMappingInfo(
-        "7e8790000-7e8b80000 r-xs 00000000 00:00 0", new String[] { "4", "4",
-            "25", "4", "0", "15", "10", "4", "0", "0", "0", "4", "4" }));
+          "7e8790000-7e8b80000 r-xs 00000000 00:00 0", new String[] {"4", "4",
+              "25", "4", "0", "15", "10", "4", "10", "0", "0", "4", "4"}));
       memoryMappingList.add(constructMemoryMappingInfo(
-        "7da677000-7e0dcf000 rw-p 00000000 00:00 0", new String[] { "4", "4",
-            "25", "4", "50", "15", "10", "4", "0", "0", "0", "4", "4" }));
+          "7da677000-7e0dcf000 rw-p 00000000 00:00 0", new String[] {"4", "4",
+              "25", "4", "50", "15", "10", "4", "10", "0", "0", "4", "4"}));
     }
   }
 
@@ -471,13 +474,12 @@ public class TestProcfsBasedProcessTree {
 
       // Check by enabling smaps
       setSmapsInProceTree(processTree, true);
-      // RSS=Min(shared_dirty,PSS)+PrivateClean+PrivateDirty (exclude r-xs,
-      // r--s)
+      // anon (exclude r-xs,r--s)
       Assert.assertEquals("rss memory does not match",
-        (100 * KB_TO_BYTES * 3), processTree.getRssMemorySize());
+          (20 * KB_TO_BYTES * 3), processTree.getRssMemorySize());
       // verify old API
       Assert.assertEquals("rss memory (old API) does not match",
-        (100 * KB_TO_BYTES * 3), processTree.getCumulativeRssmem());
+          (20 * KB_TO_BYTES * 3), processTree.getCumulativeRssmem());
 
       // test the cpu time again to see if it cumulates
       procInfos[0] =
@@ -621,10 +623,10 @@ public class TestProcfsBasedProcessTree {
           cumuRssMem, processTree.getCumulativeRssmem());
       } else {
         Assert.assertEquals("rssmem does not include new process",
-          100 * KB_TO_BYTES * 4, processTree.getRssMemorySize());
+            20 * KB_TO_BYTES * 4, processTree.getRssMemorySize());
         // verify old API
         Assert.assertEquals("rssmem (old API) does not include new process",
-          100 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem());
+            20 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem());
       }
 
       // however processes older than 1 iteration will retain the older value
@@ -650,11 +652,11 @@ public class TestProcfsBasedProcessTree {
       } else {
         Assert.assertEquals(
           "rssmem shouldn't have included new process",
-          100 * KB_TO_BYTES * 3, processTree.getRssMemorySize(1));
+            20 * KB_TO_BYTES * 3, processTree.getRssMemorySize(1));
         // Verify old API
         Assert.assertEquals(
           "rssmem (old API) shouldn't have included new process",
-          100 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(1));
+            20 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(1));
       }
 
       // one more process
@@ -696,11 +698,11 @@ public class TestProcfsBasedProcessTree {
       } else {
         Assert.assertEquals(
           "rssmem shouldn't have included new processes",
-          100 * KB_TO_BYTES * 3, processTree.getRssMemorySize(2));
+            20 * KB_TO_BYTES * 3, processTree.getRssMemorySize(2));
         // Verify old API
         Assert.assertEquals(
           "rssmem (old API) shouldn't have included new processes",
-          100 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(2));
+            20 * KB_TO_BYTES * 3, processTree.getCumulativeRssmem(2));
       }
 
       // processes older than 1 iteration should not include new process,
@@ -727,10 +729,10 @@ public class TestProcfsBasedProcessTree {
       } else {
         Assert.assertEquals(
           "rssmem shouldn't have included new processes",
-          100 * KB_TO_BYTES * 4, processTree.getRssMemorySize(1));
+            20 * KB_TO_BYTES * 4, processTree.getRssMemorySize(1));
         Assert.assertEquals(
           "rssmem (old API) shouldn't have included new processes",
-          100 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem(1));
+            20 * KB_TO_BYTES * 4, processTree.getCumulativeRssmem(1));
       }
 
       // no processes older than 3 iterations


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/51] [abbrv] hadoop git commit: HADOOP-13641. Update UGI#spawnAutoRenewalThreadForUserCreds to reduce indentation. Contributed by Huafeng Wang

Posted by ae...@apache.org.
HADOOP-13641. Update UGI#spawnAutoRenewalThreadForUserCreds to reduce indentation. Contributed by Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d59b18d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d59b18d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d59b18d

Branch: refs/heads/HDFS-7240
Commit: 3d59b18d49d98a293ae14c5b89d515ef83cc4ff7
Parents: bea004e
Author: Kai Zheng <ka...@intel.com>
Authored: Sun Oct 9 15:53:36 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Sun Oct 9 15:53:36 2016 +0600

----------------------------------------------------------------------
 .../hadoop/security/UserGroupInformation.java   | 98 ++++++++++----------
 1 file changed, 49 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d59b18d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 329859d..e8711b0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -946,60 +946,60 @@ public class UserGroupInformation {
 
   /**Spawn a thread to do periodic renewals of kerberos credentials*/
   private void spawnAutoRenewalThreadForUserCreds() {
-    if (isSecurityEnabled()) {
-      //spawn thread only if we have kerb credentials
-      if (user.getAuthenticationMethod() == AuthenticationMethod.KERBEROS &&
-          !isKeytab) {
-        Thread t = new Thread(new Runnable() {
-          
-          @Override
-          public void run() {
-            String cmd = conf.get("hadoop.kerberos.kinit.command",
-                                  "kinit");
-            KerberosTicket tgt = getTGT();
+    if (!isSecurityEnabled()
+        || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
+        || isKeytab) {
+      return;
+    }
+
+    //spawn thread only if we have kerb credentials
+    Thread t = new Thread(new Runnable() {
+
+      @Override
+      public void run() {
+        String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
+        KerberosTicket tgt = getTGT();
+        if (tgt == null) {
+          return;
+        }
+        long nextRefresh = getRefreshTime(tgt);
+        while (true) {
+          try {
+            long now = Time.now();
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Current time is " + now);
+              LOG.debug("Next refresh is " + nextRefresh);
+            }
+            if (now < nextRefresh) {
+              Thread.sleep(nextRefresh - now);
+            }
+            Shell.execCommand(cmd, "-R");
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("renewed ticket");
+            }
+            reloginFromTicketCache();
+            tgt = getTGT();
             if (tgt == null) {
+              LOG.warn("No TGT after renewal. Aborting renew thread for " +
+                  getUserName());
               return;
             }
-            long nextRefresh = getRefreshTime(tgt);
-            while (true) {
-              try {
-                long now = Time.now();
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("Current time is " + now);
-                  LOG.debug("Next refresh is " + nextRefresh);
-                }
-                if (now < nextRefresh) {
-                  Thread.sleep(nextRefresh - now);
-                }
-                Shell.execCommand(cmd, "-R");
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("renewed ticket");
-                }
-                reloginFromTicketCache();
-                tgt = getTGT();
-                if (tgt == null) {
-                  LOG.warn("No TGT after renewal. Aborting renew thread for " +
-                           getUserName());
-                  return;
-                }
-                nextRefresh = Math.max(getRefreshTime(tgt),
-                                       now + kerberosMinSecondsBeforeRelogin);
-              } catch (InterruptedException ie) {
-                LOG.warn("Terminating renewal thread");
-                return;
-              } catch (IOException ie) {
-                LOG.warn("Exception encountered while running the" +
-                    " renewal command. Aborting renew thread. " + ie);
-                return;
-              }
-            }
+            nextRefresh = Math.max(getRefreshTime(tgt),
+              now + kerberosMinSecondsBeforeRelogin);
+          } catch (InterruptedException ie) {
+            LOG.warn("Terminating renewal thread");
+            return;
+          } catch (IOException ie) {
+            LOG.warn("Exception encountered while running the" +
+                " renewal command. Aborting renew thread. " + ie);
+            return;
           }
-        });
-        t.setDaemon(true);
-        t.setName("TGT Renewer for " + getUserName());
-        t.start();
+        }
       }
-    }
+    });
+    t.setDaemon(true);
+    t.setName("TGT Renewer for " + getUserName());
+    t.start();
   }
   /**
    * Log a user in from a keytab file. Loads a user identity from a keytab


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/51] [abbrv] hadoop git commit: HADOOP-13699. Configuration does not substitute multiple references to the same var.

Posted by ae...@apache.org.
HADOOP-13699. Configuration does not substitute multiple references to the same var.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03060075
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03060075
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03060075

Branch: refs/heads/HDFS-7240
Commit: 03060075c53a2cecfbf5f60b6fc77afecf64ace5
Parents: 3441c74
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Oct 10 12:19:26 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Oct 10 12:19:26 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/conf/Configuration.java   | 23 ++++++++++---------
 .../apache/hadoop/conf/TestConfiguration.java   | 24 ++++----------------
 2 files changed, 16 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03060075/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 1e8ed50..dbbc8ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -943,10 +943,15 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    *
    * If var is unbounded the current state of expansion "prefix${var}suffix" is
    * returned.
-   *
-   * If a cycle is detected: replacing var1 requires replacing var2 ... requires
-   * replacing var1, i.e., the cycle is shorter than
-   * {@link Configuration#MAX_SUBST} then the original expr is returned.
+   * <p>
+   * This function also detects self-referential substitutions, i.e.
+   * <pre>
+   *   {@code
+   *   foo.bar = ${foo.bar}
+   *   }
+   * </pre>
+   * If a cycle is detected then the original expr is returned. Loops
+   * involving multiple substitutions are not detected.
    *
    * @param expr the literal value of a config key
    * @return null if expr is null, otherwise the value resulting from expanding
@@ -959,7 +964,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       return null;
     }
     String eval = expr;
-    Set<String> evalSet = null;
     for(int s = 0; s < MAX_SUBST; s++) {
       final int[] varBounds = findSubVariable(eval);
       if (varBounds[SUB_START_IDX] == -1) {
@@ -1004,15 +1008,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         return eval; // return literal ${var}: var is unbound
       }
 
-      // prevent recursive resolution
-      //
       final int dollar = varBounds[SUB_START_IDX] - "${".length();
       final int afterRightBrace = varBounds[SUB_END_IDX] + "}".length();
       final String refVar = eval.substring(dollar, afterRightBrace);
-      if (evalSet == null) {
-        evalSet = new HashSet<String>();
-      }
-      if (!evalSet.add(refVar)) {
+
+      // detect self-referential values
+      if (val.contains(refVar)) {
         return expr; // return original expression if there is a loop
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03060075/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 917ccbc..17112f5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -168,6 +168,9 @@ public class TestConfiguration extends TestCase {
     declareProperty("my.fullfile", "${my.base}/${my.file}${my.suffix}", "/tmp/hadoop_user/hello.txt");
     // check that undefined variables are returned as-is
     declareProperty("my.failsexpand", "a${my.undefvar}b", "a${my.undefvar}b");
+    // check that multiple variable references are resolved
+    declareProperty("my.user.group", "${user.name} ${user.name}",
+        "hadoop_user hadoop_user");
     endConfig();
     Path fileResource = new Path(CONFIG);
     mock.addResource(fileResource);
@@ -1508,7 +1511,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
-  public void testInvalidSubstitutation() {
+  public void testInvalidSubstitution() {
     final Configuration configuration = new Configuration(false);
 
     // 2-var loops
@@ -1522,25 +1525,6 @@ public class TestConfiguration extends TestCase {
       configuration.set(key, keyExpression);
       assertEquals("Unexpected value", keyExpression, configuration.get(key));
     }
-
-    //
-    // 3-variable loops
-    //
-
-    final String expVal1 = "${test.var2}";
-    String testVar1 = "test.var1";
-    configuration.set(testVar1, expVal1);
-    configuration.set("test.var2", "${test.var3}");
-    configuration.set("test.var3", "${test.var1}");
-    assertEquals("Unexpected value", expVal1, configuration.get(testVar1));
-
-    // 3-variable loop with non-empty value prefix/suffix
-    //
-    final String expVal2 = "foo2${test.var2}bar2";
-    configuration.set(testVar1, expVal2);
-    configuration.set("test.var2", "foo3${test.var3}bar3");
-    configuration.set("test.var3", "foo1${test.var1}bar1");
-    assertEquals("Unexpected value", expVal2, configuration.get(testVar1));
   }
 
   public void testIncompleteSubbing() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/51] [abbrv] hadoop git commit: HADOOP-13692. hadoop-aws should declare explicit dependency on Jackson 2 jars to prevent classpath conflicts. Contributed by Chris Nauroth.

Posted by ae...@apache.org.
HADOOP-13692. hadoop-aws should declare explicit dependency on Jackson 2 jars to prevent classpath conflicts. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69620f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69620f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69620f95

Branch: refs/heads/HDFS-7240
Commit: 69620f955997250d1b543d86d4907ee50218152a
Parents: 3059b25
Author: Chris Nauroth <cn...@apache.org>
Authored: Fri Oct 7 11:41:19 2016 -0700
Committer: Chris Nauroth <cn...@apache.org>
Committed: Fri Oct 7 11:41:19 2016 -0700

----------------------------------------------------------------------
 hadoop-tools/hadoop-aws/pom.xml | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69620f95/hadoop-tools/hadoop-aws/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 49b0379..1c1bb02 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -286,6 +286,18 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-annotations</artifactId>
+    </dependency>
+    <dependency>
       <groupId>joda-time</groupId>
       <artifactId>joda-time</artifactId>
     </dependency>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/51] [abbrv] hadoop git commit: HADOOP-13705. Revert HADOOP-13534 Remove unused TrashPolicy#getInstance and initialize code.

Posted by ae...@apache.org.
HADOOP-13705. Revert HADOOP-13534 Remove unused TrashPolicy#getInstance and initialize code.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a09bf7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a09bf7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a09bf7c

Branch: refs/heads/HDFS-7240
Commit: 8a09bf7c19d9d2f6d6853d45e11b0d38c7c67f2a
Parents: 4b32b14
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Oct 11 13:46:07 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Oct 11 13:46:07 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/TrashPolicy.java  | 30 ++++++++++++++++++++
 .../apache/hadoop/fs/TrashPolicyDefault.java    | 15 ++++++++++
 .../java/org/apache/hadoop/fs/TestTrash.java    |  4 +++
 3 files changed, 49 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index bd99db4..157b9ab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -38,6 +38,17 @@ public abstract class TrashPolicy extends Configured {
 
   /**
    * Used to setup the trash policy. Must be implemented by all TrashPolicy
+   * implementations.
+   * @param conf the configuration to be used
+   * @param fs the filesystem to be used
+   * @param home the home directory
+   * @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.
+   */
+  @Deprecated
+  public abstract void initialize(Configuration conf, FileSystem fs, Path home);
+
+  /**
+   * Used to setup the trash policy. Must be implemented by all TrashPolicy
    * implementations. Different from initialize(conf, fs, home), this one does
    * not assume trash always under /user/$USER due to HDFS encryption zone.
    * @param conf the configuration to be used
@@ -105,6 +116,25 @@ public abstract class TrashPolicy extends Configured {
    *
    * @param conf the configuration to be used
    * @param fs the file system to be used
+   * @param home the home directory
+   * @return an instance of TrashPolicy
+   * @deprecated Use {@link #getInstance(Configuration, FileSystem)} instead.
+   */
+  @Deprecated
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) {
+    Class<? extends TrashPolicy> trashClass = conf.getClass(
+        "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
+    TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
+    trash.initialize(conf, fs, home); // initialize TrashPolicy
+    return trash;
+  }
+
+  /**
+   * Get an instance of the configured TrashPolicy based on the value
+   * of the configuration parameter fs.trash.classname.
+   *
+   * @param conf the configuration to be used
+   * @param fs the file system to be used
    * @return an instance of TrashPolicy
    */
   public static TrashPolicy getInstance(Configuration conf, FileSystem fs)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index f4a825c..72222be 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -75,6 +75,21 @@ public class TrashPolicyDefault extends TrashPolicy {
     initialize(conf, fs);
   }
 
+  /**
+   * @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.
+   */
+  @Override
+  @Deprecated
+  public void initialize(Configuration conf, FileSystem fs, Path home) {
+    this.fs = fs;
+    this.deletionInterval = (long)(conf.getFloat(
+        FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
+        * MSECS_PER_MINUTE);
+    this.emptierInterval = (long)(conf.getFloat(
+        FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
+        * MSECS_PER_MINUTE);
+   }
+
   @Override
   public void initialize(Configuration conf, FileSystem fs) {
     this.fs = fs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 2aba01f..338aff6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -692,6 +692,10 @@ public class TestTrash extends TestCase {
     public TestTrashPolicy() { }
 
     @Override
+    public void initialize(Configuration conf, FileSystem fs, Path home) {
+    }
+
+    @Override
     public void initialize(Configuration conf, FileSystem fs) {
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/51] [abbrv] hadoop git commit: MAPREDUCE-6776. yarn.app.mapreduce.client.job.max-retries should have a more useful default (miklos.szegedi@cloudera.com via rkanter)

Posted by ae...@apache.org.
MAPREDUCE-6776. yarn.app.mapreduce.client.job.max-retries should have a more useful default (miklos.szegedi@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3f37e6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3f37e6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3f37e6f

Branch: refs/heads/HDFS-7240
Commit: f3f37e6fb8172f6434e06eb9a137c0c155b3952e
Parents: 2e853be
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Oct 7 14:47:06 2016 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Oct 7 14:47:06 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/mapreduce/MRJobConfig.java    |  2 +-
 .../src/main/resources/mapred-default.xml       | 10 +++---
 .../apache/hadoop/mapred/JobClientUnitTest.java | 34 ++++++++++++++++----
 3 files changed, 34 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 5716404..1325b74 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -505,7 +505,7 @@ public interface MRJobConfig {
    */
   public static final String MR_CLIENT_JOB_MAX_RETRIES =
       MR_PREFIX + "client.job.max-retries";
-  public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 0;
+  public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 3;
 
   /**
    * How long to wait between jobclient retries on failure

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 73aaa7a..fe29212 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1505,12 +1505,12 @@
 
 <property>
   <name>yarn.app.mapreduce.client.job.max-retries</name>
-  <value>0</value>
+  <value>3</value>
   <description>The number of retries the client will make for getJob and
-  dependent calls.  The default is 0 as this is generally only needed for
-  non-HDFS DFS where additional, high level retries are required to avoid
-  spurious failures during the getJob call.  30 is a good value for
-  WASB</description>
+    dependent calls.
+    This is needed for non-HDFS DFS where additional, high level
+    retries are required to avoid spurious failures during the getJob call.
+    30 is a good value for WASB</description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
index 4895a5b..e02232d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
@@ -225,10 +225,10 @@ public class JobClientUnitTest {
 
     //To prevent the test from running for a very long time, lower the retry
     JobConf conf = new JobConf();
-    conf.set(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, "3");
+    conf.setInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, 2);
 
     TestJobClientGetJob client = new TestJobClientGetJob(conf);
-    JobID id = new JobID("ajob",1);
+    JobID id = new JobID("ajob", 1);
     RunningJob rj = mock(RunningJob.class);
     client.setRunningJob(rj);
 
@@ -236,13 +236,35 @@ public class JobClientUnitTest {
     assertNotNull(client.getJob(id));
     assertEquals(client.getLastGetJobRetriesCounter(), 0);
 
-    //3 retry
+    //2 retries
+    client.setGetJobRetries(2);
+    assertNotNull(client.getJob(id));
+    assertEquals(client.getLastGetJobRetriesCounter(), 2);
+
+    //beyond yarn.app.mapreduce.client.job.max-retries, will get null
     client.setGetJobRetries(3);
+    assertNull(client.getJob(id));
+  }
+
+  @Test
+  public void testGetJobRetryDefault() throws Exception {
+
+    //To prevent the test from running for a very long time, lower the retry
+    JobConf conf = new JobConf();
+
+    TestJobClientGetJob client = new TestJobClientGetJob(conf);
+    JobID id = new JobID("ajob", 1);
+    RunningJob rj = mock(RunningJob.class);
+    client.setRunningJob(rj);
+
+    //3 retries (default)
+    client.setGetJobRetries(MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);
     assertNotNull(client.getJob(id));
-    assertEquals(client.getLastGetJobRetriesCounter(), 3);
+    assertEquals(client.getLastGetJobRetriesCounter(),
+        MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);
 
-    //beyond MAPREDUCE_JOBCLIENT_GETJOB_MAX_RETRY_KEY, will get null
-    client.setGetJobRetries(5);
+    //beyond yarn.app.mapreduce.client.job.max-retries, will get null
+    client.setGetJobRetries(MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES + 1);
     assertNull(client.getJob(id));
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/51] [abbrv] hadoop git commit: HDFS-10985. o.a.h.ha.TestZKFailoverController should not use fixed time sleep before assertions. Contributed by Mingliang Liu

Posted by ae...@apache.org.
HDFS-10985. o.a.h.ha.TestZKFailoverController should not use fixed time sleep before assertions. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c874fa91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c874fa91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c874fa91

Branch: refs/heads/HDFS-7240
Commit: c874fa914dfbf07d1731f5e87398607366675879
Parents: b963818
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Oct 7 17:03:08 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Oct 10 13:33:07 2016 -0700

----------------------------------------------------------------------
 .../hadoop/ha/TestZKFailoverController.java     | 34 ++++++++++++--------
 1 file changed, 21 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c874fa91/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index 164167c..846c8ae 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.*;
 
 import java.security.NoSuchAlgorithmException;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@@ -441,12 +442,16 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
     cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
     cluster.waitForActiveLockHolder(0);
 
-    Thread.sleep(10000); // allow to quiesce
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return cluster.getService(0).fenceCount == 0 &&
+            cluster.getService(1).fenceCount == 0 &&
+            cluster.getService(0).activeTransitionCount == 2 &&
+            cluster.getService(1).activeTransitionCount == 1;
+      }
+    }, 100, 60 * 1000);
 
-    assertEquals(0, cluster.getService(0).fenceCount);
-    assertEquals(0, cluster.getService(1).fenceCount);
-    assertEquals(2, cluster.getService(0).activeTransitionCount);
-    assertEquals(1, cluster.getService(1).activeTransitionCount);
   }
 
   @Test
@@ -590,14 +595,17 @@ public class TestZKFailoverController extends ClientBaseWithFixes {
     cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
     cluster.waitForActiveLockHolder(0);
 
-    Thread.sleep(10000); // allow to quiesce
-
-    assertEquals(0, cluster.getService(0).fenceCount);
-    assertEquals(0, cluster.getService(1).fenceCount);
-    assertEquals(0, cluster.getService(2).fenceCount);
-    assertEquals(2, cluster.getService(0).activeTransitionCount);
-    assertEquals(1, cluster.getService(1).activeTransitionCount);
-    assertEquals(1, cluster.getService(2).activeTransitionCount);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return cluster.getService(0).fenceCount == 0 &&
+            cluster.getService(1).fenceCount == 0 &&
+            cluster.getService(2).fenceCount == 0 &&
+            cluster.getService(0).activeTransitionCount == 2 &&
+            cluster.getService(1).activeTransitionCount == 1 &&
+            cluster.getService(2).activeTransitionCount == 1;
+      }
+    }, 100, 60 * 1000);
   }
 
   private int runFC(DummyHAService target, String ... args) throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/51] [abbrv] hadoop git commit: HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.

Posted by ae...@apache.org.
HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3565c9af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3565c9af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3565c9af

Branch: refs/heads/HDFS-7240
Commit: 3565c9af17ab05bf9e7f68b71b6c6850df772bb9
Parents: 69620f95
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Oct 7 14:14:47 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Oct 7 14:15:59 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirDeleteOp.java     | 63 ++++++++++----------
 .../hdfs/server/namenode/FSEditLogLoader.java   | 11 ++--
 .../hdfs/server/namenode/FSNamesystem.java      |  2 +-
 3 files changed, 38 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3565c9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 21ee3ce..328ce79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -55,7 +55,7 @@ class FSDirDeleteOp {
     FSNamesystem fsn = fsd.getFSNamesystem();
     fsd.writeLock();
     try {
-      if (deleteAllowed(iip, iip.getPath()) ) {
+      if (deleteAllowed(iip)) {
         List<INodeDirectory> snapshottableDirs = new ArrayList<>();
         FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
         ReclaimContext context = new ReclaimContext(
@@ -98,20 +98,24 @@ class FSDirDeleteOp {
     FSDirectory fsd = fsn.getFSDirectory();
     FSPermissionChecker pc = fsd.getPermissionChecker();
 
-    final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
-    src = iip.getPath();
-    if (!recursive && fsd.isNonEmptyDirectory(iip)) {
-      throw new PathIsNotEmptyDirectoryException(src + " is non empty");
+    if (FSDirectory.isExactReservedName(src)) {
+      throw new InvalidPathException(src);
     }
+
+    final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
     if (fsd.isPermissionEnabled()) {
       fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
                           FsAction.ALL, true);
     }
-    if (recursive && fsd.isNonEmptyDirectory(iip)) {
-      checkProtectedDescendants(fsd, src);
+    if (fsd.isNonEmptyDirectory(iip)) {
+      if (!recursive) {
+        throw new PathIsNotEmptyDirectoryException(
+            iip.getPath() + " is non empty");
+      }
+      checkProtectedDescendants(fsd, iip);
     }
 
-    return deleteInternal(fsn, src, iip, logRetryCache);
+    return deleteInternal(fsn, iip, logRetryCache);
   }
 
   /**
@@ -126,17 +130,14 @@ class FSDirDeleteOp {
    * @param src a string representation of a path to an inode
    * @param mtime the time the inode is removed
    */
-  static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
+  static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)
       throws IOException {
     assert fsd.hasWriteLock();
     FSNamesystem fsn = fsd.getFSNamesystem();
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     List<INode> removedINodes = new ChunkedArrayList<>();
     List<Long> removedUCFiles = new ChunkedArrayList<>();
-
-    final INodesInPath iip = fsd.getINodesInPath4Write(
-        FSDirectory.normalizePath(src), false);
-    if (!deleteAllowed(iip, src)) {
+    if (!deleteAllowed(iip)) {
       return;
     }
     List<INodeDirectory> snapshottableDirs = new ArrayList<>();
@@ -162,7 +163,6 @@ class FSDirDeleteOp {
    * <p>
    * For small directory or file the deletion is done in one shot.
    * @param fsn namespace
-   * @param src path name to be deleted
    * @param iip the INodesInPath instance containing all the INodes for the path
    * @param logRetryCache whether to record RPC ids in editlog for retry cache
    *          rebuilding
@@ -170,15 +170,11 @@ class FSDirDeleteOp {
    * @throws IOException
    */
   static BlocksMapUpdateInfo deleteInternal(
-      FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache)
+      FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
       throws IOException {
     assert fsn.hasWriteLock();
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
-    }
-
-    if (FSDirectory.isExactReservedName(src)) {
-      throw new InvalidPathException(src);
+      NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
     }
 
     FSDirectory fsd = fsn.getFSDirectory();
@@ -193,14 +189,14 @@ class FSDirDeleteOp {
     if (filesRemoved < 0) {
       return null;
     }
-    fsd.getEditLog().logDelete(src, mtime, logRetryCache);
+    fsd.getEditLog().logDelete(iip.getPath(), mtime, logRetryCache);
     incrDeletedFileCount(filesRemoved);
 
     fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true);
 
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
-                                        + src +" is removed");
+      NameNode.stateChangeLog.debug(
+          "DIR* Namesystem.delete: " + iip.getPath() +" is removed");
     }
     return collectedBlocks;
   }
@@ -209,19 +205,18 @@ class FSDirDeleteOp {
     NameNode.getNameNodeMetrics().incrFilesDeleted(count);
   }
 
-  private static boolean deleteAllowed(final INodesInPath iip,
-      final String src) {
+  private static boolean deleteAllowed(final INodesInPath iip) {
     if (iip.length() < 1 || iip.getLastINode() == null) {
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug(
             "DIR* FSDirectory.unprotectedDelete: failed to remove "
-                + src + " because it does not exist");
+                + iip.getPath() + " because it does not exist");
       }
       return false;
     } else if (iip.length() == 1) { // src is the root
       NameNode.stateChangeLog.warn(
-          "DIR* FSDirectory.unprotectedDelete: failed to remove " + src +
-              " because the root is not allowed to be deleted");
+          "DIR* FSDirectory.unprotectedDelete: failed to remove " +
+              iip.getPath() + " because the root is not allowed to be deleted");
       return false;
     }
     return true;
@@ -278,15 +273,19 @@ class FSDirDeleteOp {
    * Throw if the given directory has any non-empty protected descendants
    * (including itself).
    *
-   * @param src directory whose descendants are to be checked. The caller
-   *            must ensure src is not terminated with {@link Path#SEPARATOR}.
+   * @param iip directory whose descendants are to be checked.
    * @throws AccessControlException if a non-empty protected descendant
    *                                was found.
    */
-  private static void checkProtectedDescendants(FSDirectory fsd, String src)
-      throws AccessControlException, UnresolvedLinkException {
+  private static void checkProtectedDescendants(
+      FSDirectory fsd, INodesInPath iip)
+          throws AccessControlException, UnresolvedLinkException {
     final SortedSet<String> protectedDirs = fsd.getProtectedDirectories();
+    if (protectedDirs.isEmpty()) {
+      return;
+    }
 
+    String src = iip.getPath();
     // Is src protected? Caller has already checked it is non-empty.
     if (protectedDirs.contains(src)) {
       throw new AccessControlException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3565c9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 09201cf..8abdba8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -356,7 +356,7 @@ public class FSEditLogLoader {
       INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
       if (oldFile != null && addCloseOp.overwrite) {
         // This is OP_ADD with overwrite
-        FSDirDeleteOp.deleteForEditLog(fsDir, path, addCloseOp.mtime);
+        FSDirDeleteOp.deleteForEditLog(fsDir, iip, addCloseOp.mtime);
         iip = INodesInPath.replace(iip, iip.length() - 1, null);
         oldFile = null;
       }
@@ -565,10 +565,11 @@ public class FSEditLogLoader {
     }
     case OP_DELETE: {
       DeleteOp deleteOp = (DeleteOp)op;
-      FSDirDeleteOp.deleteForEditLog(
-          fsDir, renameReservedPathsOnUpgrade(deleteOp.path, logVersion),
-          deleteOp.timestamp);
-      
+      final String src = renameReservedPathsOnUpgrade(
+          deleteOp.path, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src, false);
+      FSDirDeleteOp.deleteForEditLog(fsDir, iip, deleteOp.timestamp);
+
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3565c9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 34fb8b6..0f4f14c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3805,7 +3805,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas.");
           BlocksMapUpdateInfo toRemoveBlocks =
               FSDirDeleteOp.deleteInternal(
-                  FSNamesystem.this, bc.getName(),
+                  FSNamesystem.this,
                   INodesInPath.fromINode((INodeFile) bc), false);
           changed |= toRemoveBlocks != null;
           if (toRemoveBlocks != null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/51] [abbrv] hadoop git commit: HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 57fab66..76af724 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -23,11 +23,13 @@ import java.io.FileOutputStream;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.nio.file.StandardCopyOption;
 import java.util.Collections;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -56,13 +58,18 @@ import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.BlockDirFilter;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.CloseableReferenceCount;
@@ -102,8 +109,14 @@ public class FsVolumeImpl implements FsVolumeSpi {
   private final StorageType storageType;
   private final Map<String, BlockPoolSlice> bpSlices
       = new ConcurrentHashMap<String, BlockPoolSlice>();
+
+  // Refers to the base StorageLocation used to construct this volume
+  // (i.e., does not include STORAGE_DIR_CURRENT in
+  // <location>/STORAGE_DIR_CURRENT/)
+  private final StorageLocation storageLocation;
+
   private final File currentDir;    // <StorageDirectory>/current
-  private final DF usage;           
+  private final DF usage;
   private final long reserved;
   private CloseableReferenceCount reference = new CloseableReferenceCount();
 
@@ -124,19 +137,25 @@ public class FsVolumeImpl implements FsVolumeSpi {
    */
   protected ThreadPoolExecutor cacheExecutor;
   
-  FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
-      Configuration conf, StorageType storageType) throws IOException {
+  FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
+      Configuration conf) throws IOException {
+
+    if (sd.getStorageLocation() == null) {
+      throw new IOException("StorageLocation specified for storage directory " +
+          sd + " is null");
+    }
     this.dataset = dataset;
     this.storageID = storageID;
+    this.reservedForReplicas = new AtomicLong(0L);
+    this.storageLocation = sd.getStorageLocation();
+    this.currentDir = sd.getCurrentDir();
+    File parent = currentDir.getParentFile();
+    this.usage = new DF(parent, conf);
+    this.storageType = storageLocation.getStorageType();
     this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
         + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
-    this.reservedForReplicas = new AtomicLong(0L);
-    this.currentDir = currentDir;
-    File parent = currentDir.getParentFile();
-    this.usage = new DF(parent, conf);
-    this.storageType = storageType;
     this.configuredCapacity = -1;
     this.conf = conf;
     cacheExecutor = initializeCacheExecutor(parent);
@@ -285,19 +304,20 @@ public class FsVolumeImpl implements FsVolumeSpi {
     return true;
   }
 
+  @VisibleForTesting
   File getCurrentDir() {
     return currentDir;
   }
   
-  File getRbwDir(String bpid) throws IOException {
+  protected File getRbwDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getRbwDir();
   }
 
-  File getLazyPersistDir(String bpid) throws IOException {
+  protected File getLazyPersistDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getLazypersistDir();
   }
 
-  File getTmpDir(String bpid) throws IOException {
+  protected File getTmpDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getTmpDir();
   }
 
@@ -448,6 +468,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
     return reserved;
   }
 
+  @VisibleForTesting
   BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
     BlockPoolSlice bp = bpSlices.get(bpid);
     if (bp == null) {
@@ -457,21 +478,33 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
   @Override
-  public String getBasePath() {
-    return currentDir.getParent();
+  public URI getBaseURI() {
+    return new File(currentDir.getParent()).toURI();
   }
-  
+
   @Override
-  public boolean isTransientStorage() {
-    return storageType.isTransient();
+  public DF getUsageStats(Configuration conf) {
+    if (currentDir != null) {
+      try {
+        return new DF(new File(currentDir.getParent()), conf);
+      } catch (IOException e) {
+        LOG.error("Unable to get disk statistics for volume " + this);
+      }
+    }
+    return null;
   }
 
   @Override
-  public String getPath(String bpid) throws IOException {
-    return getBlockPoolSlice(bpid).getDirectory().getAbsolutePath();
+  public StorageLocation getStorageLocation() {
+    return storageLocation;
   }
 
   @Override
+  public boolean isTransientStorage() {
+    return storageType.isTransient();
+  }
+
+  @VisibleForTesting
   public File getFinalizedDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getFinalizedDir();
   }
@@ -951,7 +984,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   @Override
   public String toString() {
-    return currentDir.getAbsolutePath();
+    return currentDir != null ? currentDir.getParent() : "NULL";
   }
 
   void shutdown() {
@@ -1189,5 +1222,167 @@ public class FsVolumeImpl implements FsVolumeSpi {
         dstBlockFile, true, DFSUtilClient.getSmallBufferSize(conf), conf);
   }
 
+  @Override
+  public LinkedList<ScanInfo> compileReport(String bpid,
+      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+      throws InterruptedException, IOException {
+    return compileReport(getFinalizedDir(bpid),
+        getFinalizedDir(bpid), report, reportCompiler);
+  }
+
+  private LinkedList<ScanInfo> compileReport(File bpFinalizedDir,
+      File dir, LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+        throws InterruptedException {
+
+    reportCompiler.throttle();
+
+    List <String> fileNames;
+    try {
+      fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE);
+    } catch (IOException ioe) {
+      LOG.warn("Exception occured while compiling report: ", ioe);
+      // Initiate a check on disk failure.
+      dataset.datanode.checkDiskErrorAsync();
+      // Ignore this directory and proceed.
+      return report;
+    }
+    Collections.sort(fileNames);
+
+    /*
+     * Assumption: In the sorted list of files block file appears immediately
+     * before block metadata file. This is true for the current naming
+     * convention for block file blk_<blockid> and meta file
+     * blk_<blockid>_<genstamp>.meta
+     */
+    for (int i = 0; i < fileNames.size(); i++) {
+      // Make sure this thread can make a timely exit. With a low throttle
+      // rate, completing a run can take a looooong time.
+      if (Thread.interrupted()) {
+        throw new InterruptedException();
+      }
+
+      File file = new File(dir, fileNames.get(i));
+      if (file.isDirectory()) {
+        compileReport(bpFinalizedDir, file, report, reportCompiler);
+        continue;
+      }
+      if (!Block.isBlockFilename(file)) {
+        if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, file.getName())) {
+          long blockId = Block.getBlockId(file.getName());
+          verifyFileLocation(file.getParentFile(), bpFinalizedDir,
+              blockId);
+          report.add(new ScanInfo(blockId, null, file, this));
+        }
+        continue;
+      }
+      File blockFile = file;
+      long blockId = Block.filename2id(file.getName());
+      File metaFile = null;
+
+      // Skip all the files that start with block name until
+      // getting to the metafile for the block
+      while (i + 1 < fileNames.size()) {
+        File blkMetaFile = new File(dir, fileNames.get(i + 1));
+        if (!(blkMetaFile.isFile()
+            && blkMetaFile.getName().startsWith(blockFile.getName()))) {
+          break;
+        }
+        i++;
+        if (isBlockMetaFile(blockFile.getName(), blkMetaFile.getName())) {
+          metaFile = blkMetaFile;
+          break;
+        }
+      }
+      verifyFileLocation(blockFile, bpFinalizedDir, blockId);
+      report.add(new ScanInfo(blockId, blockFile, metaFile, this));
+    }
+    return report;
+  }
+
+  /**
+   * Helper method to determine if a file name is consistent with a block.
+   * meta-data file
+   *
+   * @param blockId the block ID
+   * @param metaFile the file to check
+   * @return whether the file name is a block meta-data file name
+   */
+  private static boolean isBlockMetaFile(String blockId, String metaFile) {
+    return metaFile.startsWith(blockId)
+        && metaFile.endsWith(Block.METADATA_EXTENSION);
+  }
+
+  /**
+   * Verify whether the actual directory location of block file has the
+   * expected directory path computed using its block ID.
+   */
+  private void verifyFileLocation(File actualBlockFile,
+      File bpFinalizedDir, long blockId) {
+    File expectedBlockDir =
+        DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
+    File actualBlockDir = actualBlockFile.getParentFile();
+    if (actualBlockDir.compareTo(expectedBlockDir) != 0) {
+      LOG.warn("Block: " + blockId +
+          " found in invalid directory.  Expected directory: " +
+          expectedBlockDir + ".  Actual directory: " + actualBlockDir);
+    }
+  }
+
+  public ReplicaInfo moveBlockToTmpLocation(ExtendedBlock block,
+      ReplicaInfo replicaInfo,
+      int smallBufferSize,
+      Configuration conf) throws IOException {
+
+    File[] blockFiles = FsDatasetImpl.copyBlockFiles(block.getBlockId(),
+        block.getGenerationStamp(), replicaInfo,
+        getTmpDir(block.getBlockPoolId()),
+        replicaInfo.isOnTransientStorage(), smallBufferSize, conf);
+
+    ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY)
+        .setBlockId(replicaInfo.getBlockId())
+        .setGenerationStamp(replicaInfo.getGenerationStamp())
+        .setFsVolume(this)
+        .setDirectoryToUse(blockFiles[0].getParentFile())
+        .setBytesToReserve(0)
+        .build();
+    newReplicaInfo.setNumBytes(blockFiles[1].length());
+    return newReplicaInfo;
+  }
+
+  public File[] copyBlockToLazyPersistLocation(String bpId, long blockId,
+      long genStamp,
+      ReplicaInfo replicaInfo,
+      int smallBufferSize,
+      Configuration conf) throws IOException {
+
+    File lazyPersistDir  = getLazyPersistDir(bpId);
+    if (!lazyPersistDir.exists() && !lazyPersistDir.mkdirs()) {
+      FsDatasetImpl.LOG.warn("LazyWriter failed to create " + lazyPersistDir);
+      throw new IOException("LazyWriter fail to find or " +
+          "create lazy persist dir: " + lazyPersistDir.toString());
+    }
+
+    // No FsDatasetImpl lock for the file copy
+    File[] targetFiles = FsDatasetImpl.copyBlockFiles(
+        blockId, genStamp, replicaInfo, lazyPersistDir, true,
+        smallBufferSize, conf);
+    return targetFiles;
+  }
+
+  public void incrNumBlocks(String bpid) throws IOException {
+    getBlockPoolSlice(bpid).incrNumBlocks();
+  }
+
+  public void resolveDuplicateReplicas(String bpid, ReplicaInfo memBlockInfo,
+      ReplicaInfo diskBlockInfo, ReplicaMap volumeMap) throws IOException {
+    getBlockPoolSlice(bpid).resolveDuplicateReplicas(
+        memBlockInfo, diskBlockInfo, volumeMap);
+  }
+
+  public ReplicaInfo activateSavedReplica(String bpid,
+      ReplicaInfo replicaInfo, RamDiskReplica replicaState) throws IOException {
+    return getBlockPoolSlice(bpid).activateSavedReplica(replicaInfo,
+        replicaState);
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
new file mode 100644
index 0000000..a1f7e91
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+
+/**
+ * This class is to be used as a builder for {@link FsVolumeImpl} objects.
+ */
+public class FsVolumeImplBuilder {
+
+  private FsDatasetImpl dataset;
+  private String storageID;
+  private StorageDirectory sd;
+  private Configuration conf;
+
+  public FsVolumeImplBuilder() {
+    dataset = null;
+    storageID = null;
+    sd = null;
+    conf = null;
+  }
+
+  FsVolumeImplBuilder setDataset(FsDatasetImpl dataset) {
+    this.dataset = dataset;
+    return this;
+  }
+
+  FsVolumeImplBuilder setStorageID(String id) {
+    this.storageID = id;
+    return this;
+  }
+
+  FsVolumeImplBuilder setStorageDirectory(StorageDirectory sd) {
+    this.sd = sd;
+    return this;
+  }
+
+  FsVolumeImplBuilder setConf(Configuration conf) {
+    this.conf = conf;
+    return this;
+  }
+
+  FsVolumeImpl build() throws IOException {
+    return new FsVolumeImpl(dataset, storageID, sd, conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index f869008..cf9c319 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import java.io.File;
 import java.io.IOException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.AutoCloseableLock;
@@ -51,8 +51,10 @@ class FsVolumeList {
   private final CopyOnWriteArrayList<FsVolumeImpl> volumes =
       new CopyOnWriteArrayList<>();
   // Tracks volume failures, sorted by volume path.
-  private final Map<String, VolumeFailureInfo> volumeFailureInfos =
-      Collections.synchronizedMap(new TreeMap<String, VolumeFailureInfo>());
+  // map from volume storageID to the volume failure info
+  private final Map<StorageLocation, VolumeFailureInfo> volumeFailureInfos =
+      Collections.synchronizedMap(
+          new TreeMap<StorageLocation, VolumeFailureInfo>());
   private final ConcurrentLinkedQueue<FsVolumeImpl> volumesBeingRemoved =
       new ConcurrentLinkedQueue<>();
   private final AutoCloseableLock checkDirsLock;
@@ -234,10 +236,9 @@ class FsVolumeList {
    *
    * @return list of all the failed volumes.
    */
-  Set<File> checkDirs() {
+  Set<StorageLocation> checkDirs() {
     try (AutoCloseableLock lock = checkDirsLock.acquire()) {
-      Set<File> failedVols = null;
-      
+      Set<StorageLocation> failedLocations = null;
       // Make a copy of volumes for performing modification 
       final List<FsVolumeImpl> volumeList = getVolumes();
 
@@ -247,10 +248,10 @@ class FsVolumeList {
           fsv.checkDirs();
         } catch (DiskErrorException e) {
           FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ", e);
-          if (failedVols == null) {
-            failedVols = new HashSet<>(1);
+          if (failedLocations == null) {
+            failedLocations = new HashSet<>(1);
           }
-          failedVols.add(new File(fsv.getBasePath()).getAbsoluteFile());
+          failedLocations.add(fsv.getStorageLocation());
           addVolumeFailureInfo(fsv);
           removeVolume(fsv);
         } catch (ClosedChannelException e) {
@@ -261,13 +262,13 @@ class FsVolumeList {
         }
       }
       
-      if (failedVols != null && failedVols.size() > 0) {
-        FsDatasetImpl.LOG.warn("Completed checkDirs. Found " + failedVols.size()
-            + " failure volumes.");
+      if (failedLocations != null && failedLocations.size() > 0) {
+        FsDatasetImpl.LOG.warn("Completed checkDirs. Found " +
+            failedLocations.size() + " failure volumes.");
       }
 
       waitVolumeRemoved(5000, checkDirsLockCondition);
-      return failedVols;
+      return failedLocations;
     }
   }
 
@@ -315,7 +316,7 @@ class FsVolumeList {
     }
     // If the volume is used to replace a failed volume, it needs to reset the
     // volume failure info for this volume.
-    removeVolumeFailureInfo(new File(volume.getBasePath()));
+    removeVolumeFailureInfo(volume.getStorageLocation());
     FsDatasetImpl.LOG.info("Added new volume: " +
         volume.getStorageID());
   }
@@ -351,16 +352,15 @@ class FsVolumeList {
    * @param volume the volume to be removed.
    * @param clearFailure set true to remove failure info for this volume.
    */
-  void removeVolume(File volume, boolean clearFailure) {
+  void removeVolume(StorageLocation storageLocation, boolean clearFailure) {
     for (FsVolumeImpl fsVolume : volumes) {
-      String basePath = new File(fsVolume.getBasePath()).getAbsolutePath();
-      String targetPath = volume.getAbsolutePath();
-      if (basePath.equals(targetPath)) {
+      StorageLocation baseLocation = fsVolume.getStorageLocation();
+      if (baseLocation.equals(storageLocation)) {
         removeVolume(fsVolume);
       }
     }
     if (clearFailure) {
-      removeVolumeFailureInfo(volume);
+      removeVolumeFailureInfo(storageLocation);
     }
   }
 
@@ -394,13 +394,13 @@ class FsVolumeList {
 
   private void addVolumeFailureInfo(FsVolumeImpl vol) {
     addVolumeFailureInfo(new VolumeFailureInfo(
-        new File(vol.getBasePath()).getAbsolutePath(),
+        vol.getStorageLocation(),
         Time.now(),
         vol.getCapacity()));
   }
 
-  private void removeVolumeFailureInfo(File vol) {
-    volumeFailureInfos.remove(vol.getAbsolutePath());
+  private void removeVolumeFailureInfo(StorageLocation location) {
+    volumeFailureInfos.remove(location);
   }
 
   void addBlockPool(final String bpid, final Configuration conf) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
index 9e549f9..d6969c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
@@ -58,8 +58,8 @@ class RamDiskAsyncLazyPersistService {
   private final Configuration conf;
 
   private final ThreadGroup threadGroup;
-  private Map<File, ThreadPoolExecutor> executors
-      = new HashMap<File, ThreadPoolExecutor>();
+  private Map<String, ThreadPoolExecutor> executors
+      = new HashMap<String, ThreadPoolExecutor>();
   private final static HdfsConfiguration EMPTY_HDFS_CONF = new HdfsConfiguration();
 
   /**
@@ -75,13 +75,14 @@ class RamDiskAsyncLazyPersistService {
     this.threadGroup = new ThreadGroup(getClass().getSimpleName());
   }
 
-  private void addExecutorForVolume(final File volume) {
+  private void addExecutorForVolume(final String storageId) {
     ThreadFactory threadFactory = new ThreadFactory() {
 
       @Override
       public Thread newThread(Runnable r) {
         Thread t = new Thread(threadGroup, r);
-        t.setName("Async RamDisk lazy persist worker for volume " + volume);
+        t.setName("Async RamDisk lazy persist worker " +
+            " for volume with id " + storageId);
         return t;
       }
     };
@@ -93,39 +94,41 @@ class RamDiskAsyncLazyPersistService {
 
     // This can reduce the number of running threads
     executor.allowCoreThreadTimeOut(true);
-    executors.put(volume, executor);
+    executors.put(storageId, executor);
   }
 
   /**
    * Starts AsyncLazyPersistService for a new volume
    * @param volume the root of the new data volume.
    */
-  synchronized void addVolume(File volume) {
+  synchronized void addVolume(FsVolumeImpl volume) {
+    String storageId = volume.getStorageID();
     if (executors == null) {
       throw new RuntimeException("AsyncLazyPersistService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    ThreadPoolExecutor executor = executors.get(storageId);
     if (executor != null) {
       throw new RuntimeException("Volume " + volume + " is already existed.");
     }
-    addExecutorForVolume(volume);
+    addExecutorForVolume(storageId);
   }
 
   /**
    * Stops AsyncLazyPersistService for a volume.
    * @param volume the root of the volume.
    */
-  synchronized void removeVolume(File volume) {
+  synchronized void removeVolume(FsVolumeImpl volume) {
+    String storageId = volume.getStorageID();
     if (executors == null) {
       throw new RuntimeException("AsyncDiskService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    ThreadPoolExecutor executor = executors.get(storageId);
     if (executor == null) {
-      throw new RuntimeException("Can not find volume " + volume
-        + " to remove.");
+      throw new RuntimeException("Can not find volume with storage id " +
+          storageId + " to remove.");
     } else {
       executor.shutdown();
-      executors.remove(volume);
+      executors.remove(storageId);
     }
   }
 
@@ -135,25 +138,28 @@ class RamDiskAsyncLazyPersistService {
    * @return true if there is one thread pool for the volume
    *         false otherwise
    */
-  synchronized boolean queryVolume(File volume) {
+  synchronized boolean queryVolume(FsVolumeImpl volume) {
+    String storageId = volume.getStorageID();
     if (executors == null) {
-      throw new RuntimeException("AsyncLazyPersistService is already shutdown");
+      throw new RuntimeException(
+          "AsyncLazyPersistService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(volume);
+    ThreadPoolExecutor executor = executors.get(storageId);
     return (executor != null);
   }
 
   /**
    * Execute the task sometime in the future, using ThreadPools.
    */
-  synchronized void execute(File root, Runnable task) {
+  synchronized void execute(String storageId, Runnable task) {
     if (executors == null) {
-      throw new RuntimeException("AsyncLazyPersistService is already shutdown");
+      throw new RuntimeException(
+          "AsyncLazyPersistService is already shutdown");
     }
-    ThreadPoolExecutor executor = executors.get(root);
+    ThreadPoolExecutor executor = executors.get(storageId);
     if (executor == null) {
-      throw new RuntimeException("Cannot find root " + root
-          + " for execution of task " + task);
+      throw new RuntimeException("Cannot find root storage volume with id " +
+          storageId + " for execution of task " + task);
     } else {
       executor.execute(task);
     }
@@ -169,7 +175,7 @@ class RamDiskAsyncLazyPersistService {
     } else {
       LOG.info("Shutting down all async lazy persist service threads");
 
-      for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
+      for (Map.Entry<String, ThreadPoolExecutor> e : executors.entrySet()) {
         e.getValue().shutdown();
       }
       // clear the executor map so that calling execute again will fail.
@@ -189,18 +195,11 @@ class RamDiskAsyncLazyPersistService {
           + bpId + " block id: " + blockId);
     }
 
-    FsVolumeImpl volume = (FsVolumeImpl)target.getVolume();
-    File lazyPersistDir  = volume.getLazyPersistDir(bpId);
-    if (!lazyPersistDir.exists() && !lazyPersistDir.mkdirs()) {
-      FsDatasetImpl.LOG.warn("LazyWriter failed to create " + lazyPersistDir);
-      throw new IOException("LazyWriter fail to find or create lazy persist dir: "
-          + lazyPersistDir.toString());
-    }
-
     ReplicaLazyPersistTask lazyPersistTask = new ReplicaLazyPersistTask(
-        bpId, blockId, genStamp, creationTime, replica,
-        target, lazyPersistDir);
-    execute(volume.getCurrentDir(), lazyPersistTask);
+        bpId, blockId, genStamp, creationTime, replica, target);
+
+    FsVolumeImpl volume = (FsVolumeImpl)target.getVolume();
+    execute(volume.getStorageID(), lazyPersistTask);
   }
 
   class ReplicaLazyPersistTask implements Runnable {
@@ -210,19 +209,17 @@ class RamDiskAsyncLazyPersistService {
     private final long creationTime;
     private final ReplicaInfo replicaInfo;
     private final FsVolumeReference targetVolume;
-    private final File lazyPersistDir;
 
     ReplicaLazyPersistTask(String bpId, long blockId,
         long genStamp, long creationTime,
         ReplicaInfo replicaInfo,
-        FsVolumeReference targetVolume, File lazyPersistDir) {
+        FsVolumeReference targetVolume) {
       this.bpId = bpId;
       this.blockId = blockId;
       this.genStamp = genStamp;
       this.creationTime = creationTime;
       this.replicaInfo = replicaInfo;
       this.targetVolume = targetVolume;
-      this.lazyPersistDir = lazyPersistDir;
     }
 
     @Override
@@ -241,14 +238,14 @@ class RamDiskAsyncLazyPersistService {
       final FsDatasetImpl dataset = (FsDatasetImpl)datanode.getFSDataset();
       try (FsVolumeReference ref = this.targetVolume) {
         int smallBufferSize = DFSUtilClient.getSmallBufferSize(EMPTY_HDFS_CONF);
-        // No FsDatasetImpl lock for the file copy
-        File targetFiles[] = FsDatasetImpl.copyBlockFiles(
-            blockId, genStamp, replicaInfo, lazyPersistDir, true,
-            smallBufferSize, conf);
+
+        FsVolumeImpl volume = (FsVolumeImpl)ref.getVolume();
+        File[] targetFiles = volume.copyBlockToLazyPersistLocation(bpId,
+            blockId, genStamp, replicaInfo, smallBufferSize, conf);
 
         // Lock FsDataSetImpl during onCompleteLazyPersist callback
         dataset.onCompleteLazyPersist(bpId, blockId,
-                creationTime, targetFiles, (FsVolumeImpl)ref.getVolume());
+                creationTime, targetFiles, volume);
         succeeded = true;
       } catch (Exception e){
         FsDatasetImpl.LOG.warn(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
index c3ce2a4..a762785 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+
 /**
  * Tracks information about failure of a data volume.
  */
 final class VolumeFailureInfo {
-  private final String failedStorageLocation;
+  private final StorageLocation failedStorageLocation;
   private final long failureDate;
   private final long estimatedCapacityLost;
 
@@ -33,7 +35,8 @@ final class VolumeFailureInfo {
    * @param failedStorageLocation storage location that has failed
    * @param failureDate date/time of failure in milliseconds since epoch
    */
-  public VolumeFailureInfo(String failedStorageLocation, long failureDate) {
+  public VolumeFailureInfo(StorageLocation failedStorageLocation,
+      long failureDate) {
     this(failedStorageLocation, failureDate, 0);
   }
 
@@ -44,8 +47,8 @@ final class VolumeFailureInfo {
    * @param failureDate date/time of failure in milliseconds since epoch
    * @param estimatedCapacityLost estimate of capacity lost in bytes
    */
-  public VolumeFailureInfo(String failedStorageLocation, long failureDate,
-      long estimatedCapacityLost) {
+  public VolumeFailureInfo(StorageLocation failedStorageLocation,
+      long failureDate, long estimatedCapacityLost) {
     this.failedStorageLocation = failedStorageLocation;
     this.failureDate = failureDate;
     this.estimatedCapacityLost = estimatedCapacityLost;
@@ -56,7 +59,7 @@ final class VolumeFailureInfo {
    *
    * @return storage location that has failed
    */
-  public String getFailedStorageLocation() {
+  public StorageLocation getFailedStorageLocation() {
     return this.failedStorageLocation;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0f4f14c..2471dc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5413,7 +5413,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
       if (volumeFailureSummary != null) {
         innerinfo
-            .put("failedStorageLocations",
+            .put("failedStorageIDs",
                 volumeFailureSummary.getFailedStorageLocations())
             .put("lastVolumeFailureDate",
                 volumeFailureSummary.getLastVolumeFailureDate())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index b11b48a..6efc53a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -216,13 +217,13 @@ public class TestNameNodePrunesMissingStorages {
         datanodeToRemoveStorageFromIdx++;
       }
       // Find the volume within the datanode which holds that first storage.
-      String volumeDirectoryToRemove = null;
+      StorageLocation volumeLocationToRemove = null;
       try (FsVolumeReferences volumes =
           datanodeToRemoveStorageFrom.getFSDataset().getFsVolumeReferences()) {
         assertEquals(NUM_STORAGES_PER_DN, volumes.size());
         for (FsVolumeSpi volume : volumes) {
           if (volume.getStorageID().equals(storageIdToRemove)) {
-            volumeDirectoryToRemove = volume.getBasePath();
+            volumeLocationToRemove = volume.getStorageLocation();
           }
         }
       };
@@ -230,10 +231,11 @@ public class TestNameNodePrunesMissingStorages {
       // Replace the volume directory with a regular file, which will
       // cause a volume failure.  (If we merely removed the directory,
       // it would be re-initialized with a new storage ID.)
-      assertNotNull(volumeDirectoryToRemove);
+      assertNotNull(volumeLocationToRemove);
       datanodeToRemoveStorageFrom.shutdown();
-      FileUtil.fullyDelete(new File(volumeDirectoryToRemove));
-      FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove);
+      FileUtil.fullyDelete(volumeLocationToRemove.getFile());
+      FileOutputStream fos = new FileOutputStream(
+          volumeLocationToRemove.getFile().toString());
       try {
         fos.write(1);
       } finally {
@@ -326,7 +328,8 @@ public class TestNameNodePrunesMissingStorages {
           dn.getFSDataset().getFsVolumeReferences();
       final String newStorageId = DatanodeStorage.generateUuid();
       try {
-        File currentDir = new File(volumeRefs.get(0).getBasePath(), "current");
+        File currentDir = new File(
+            volumeRefs.get(0).getStorageLocation().getFile(), "current");
         File versionFile = new File(currentDir, "VERSION");
         rewriteVersionFile(versionFile, newStorageId);
       } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 6034d1e..6c59231 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -22,7 +22,9 @@ import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -38,6 +40,7 @@ import javax.management.StandardMBean;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -46,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -495,21 +499,6 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     }
 
     @Override
-    public String getBasePath() {
-      return null;
-    }
-
-    @Override
-    public String getPath(String bpid) throws IOException {
-      return null;
-    }
-
-    @Override
-    public File getFinalizedDir(String bpid) throws IOException {
-      return null;
-    }
-
-    @Override
     public StorageType getStorageType() {
       return null;
     }
@@ -546,6 +535,28 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     public FsDatasetSpi getDataset() {
       throw new UnsupportedOperationException();
     }
+
+    @Override
+    public StorageLocation getStorageLocation() {
+      return null;
+    }
+
+    @Override
+    public URI getBaseURI() {
+      return null;
+    }
+
+    @Override
+    public DF getUsageStats(Configuration conf) {
+      return null;
+    }
+
+    @Override
+    public LinkedList<ScanInfo> compileReport(String bpid,
+        LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+        throws InterruptedException, IOException {
+      return null;
+    }
   }
 
   private final Map<String, Map<Block, BInfo>> blockMap
@@ -1030,7 +1041,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override
-  public Set<File> checkDataDir() {
+  public Set<StorageLocation> checkDataDir() {
     // nothing to check for simulated data set
     return null;
   }
@@ -1344,7 +1355,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override
-  public synchronized void removeVolumes(Set<File> volumes, boolean clearFailure) {
+  public synchronized void removeVolumes(Collection<StorageLocation> volumes,
+      boolean clearFailure) {
     throw new UnsupportedOperationException();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
index 021361b..c55a828 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
@@ -549,7 +549,8 @@ public class TestBlockScanner {
       info.shouldRun = false;
     }
     ctx.datanode.shutdown();
-    String vPath = ctx.volumes.get(0).getBasePath();
+    String vPath = ctx.volumes.get(0).getStorageLocation()
+        .getFile().getAbsolutePath();
     File cursorPath = new File(new File(new File(vPath, "current"),
           ctx.bpids[0]), "scanner.cursor");
     assertTrue("Failed to find cursor save file in " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 0dbb09c..06387c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -52,7 +52,6 @@ import org.junit.Test;
 
 import java.io.File;
 import java.io.IOException;
-import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -519,11 +518,8 @@ public class TestDataNodeHotSwapVolumes {
     ExtendedBlock block =
         DFSTestUtil.getAllBlocks(fs, testFile).get(1).getBlock();
     FsVolumeSpi volumeWithBlock = dn.getFSDataset().getVolume(block);
-    String basePath = volumeWithBlock.getBasePath();
-    File storageDir = new File(basePath);
-    URI fileUri = storageDir.toURI();
-    String dirWithBlock =
-        "[" + volumeWithBlock.getStorageType() + "]" + fileUri;
+    String dirWithBlock = "[" + volumeWithBlock.getStorageType() + "]" +
+        volumeWithBlock.getStorageLocation().getFile().toURI();
     String newDirs = dirWithBlock;
     for (String dir : oldDirs) {
       if (dirWithBlock.startsWith(dir)) {
@@ -581,8 +577,8 @@ public class TestDataNodeHotSwapVolumes {
     try (FsDatasetSpi.FsVolumeReferences volumes =
         dataset.getFsVolumeReferences()) {
       for (FsVolumeSpi volume : volumes) {
-        assertThat(volume.getBasePath(), is(not(anyOf(
-            is(newDirs.get(0)), is(newDirs.get(2))))));
+        assertThat(volume.getStorageLocation().getFile().toString(),
+            is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
       }
     }
     DataStorage storage = dn.getStorage();
@@ -765,7 +761,7 @@ public class TestDataNodeHotSwapVolumes {
     try (FsDatasetSpi.FsVolumeReferences volumes =
       dn.getFSDataset().getFsVolumeReferences()) {
       for (FsVolumeSpi vol : volumes) {
-        if (vol.getBasePath().equals(basePath.getPath())) {
+        if (vol.getBaseURI().equals(basePath.toURI())) {
           return (FsVolumeImpl) vol;
         }
       }
@@ -810,6 +806,7 @@ public class TestDataNodeHotSwapVolumes {
     assertEquals(used, failedVolume.getDfsUsed());
 
     DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
+    LOG.info("reconfiguring DN ");
     assertThat(
         "DN did not update its own config",
         dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 6792ba8..47f4823 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -21,7 +21,6 @@ import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
@@ -254,17 +253,18 @@ public class TestDataNodeVolumeFailure {
     FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
     try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
       for (FsVolumeSpi volume : vols) {
-        assertNotEquals(new File(volume.getBasePath()).getAbsoluteFile(),
-            dn0Vol1.getAbsoluteFile());
+        assertFalse(volume.getStorageLocation().getFile()
+            .getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
+        ));
       }
     }
 
     // 3. all blocks on dn0Vol1 have been removed.
     for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
       assertNotNull(replica.getVolume());
-      assertNotEquals(
-          new File(replica.getVolume().getBasePath()).getAbsoluteFile(),
-          dn0Vol1.getAbsoluteFile());
+      assertFalse(replica.getVolume().getStorageLocation().getFile()
+          .getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()
+      ));
     }
 
     // 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index 8d021cd..4bb5e7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -539,6 +539,16 @@ public class TestDataNodeVolumeFailureReporting {
     assertCounter("VolumeFailures", expectedVolumeFailuresCounter,
         getMetrics(dn.getMetrics().name()));
     FsDatasetSpi<?> fsd = dn.getFSDataset();
+    StringBuilder strBuilder = new StringBuilder();
+    strBuilder.append("expectedFailedVolumes is ");
+    for (String expected: expectedFailedVolumes) {
+      strBuilder.append(expected + ",");
+    }
+    strBuilder.append(" fsd.getFailedStorageLocations() is ");
+    for (String expected: fsd.getFailedStorageLocations()) {
+      strBuilder.append(expected + ",");
+    }
+    LOG.info(strBuilder.toString());
     assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
     assertArrayEquals(expectedFailedVolumes, fsd.getFailedStorageLocations());
     if (expectedFailedVolumes.length > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 576aae0..08a5af9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -28,6 +28,7 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.nio.channels.FileChannel;
 import java.util.ArrayList;
@@ -44,6 +45,7 @@ import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
@@ -56,11 +58,13 @@ import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -185,18 +189,20 @@ public class TestDirectoryScanner {
           // Volume without a copy of the block. Make a copy now.
           File sourceBlock = new File(b.getBlockURI());
           File sourceMeta = new File(b.getMetadataURI());
-          String sourceRoot = b.getVolume().getBasePath();
-          String destRoot = v.getBasePath();
+          URI sourceRoot = b.getVolume().getStorageLocation().getFile().toURI();
+          URI destRoot = v.getStorageLocation().getFile().toURI();
 
           String relativeBlockPath =
-              new File(sourceRoot).toURI().relativize(sourceBlock.toURI())
+              sourceRoot.relativize(sourceBlock.toURI())
                   .getPath();
           String relativeMetaPath =
-              new File(sourceRoot).toURI().relativize(sourceMeta.toURI())
+              sourceRoot.relativize(sourceMeta.toURI())
                   .getPath();
 
-          File destBlock = new File(destRoot, relativeBlockPath);
-          File destMeta = new File(destRoot, relativeMetaPath);
+          File destBlock = new File(new File(destRoot).toString(),
+              relativeBlockPath);
+          File destMeta = new File(new File(destRoot).toString(),
+              relativeMetaPath);
 
           destBlock.getParentFile().mkdirs();
           FileUtils.copyFile(sourceBlock, destBlock);
@@ -238,7 +244,8 @@ public class TestDirectoryScanner {
     try (FsDatasetSpi.FsVolumeReferences volumes = fds.getFsVolumeReferences()) {
       int numVolumes = volumes.size();
       int index = rand.nextInt(numVolumes - 1);
-      File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
+      File finalizedDir = ((FsVolumeImpl) volumes.get(index))
+          .getFinalizedDir(bpid);
       File file = new File(finalizedDir, getBlockFile(id));
       if (file.createNewFile()) {
         LOG.info("Created block file " + file.getName());
@@ -253,8 +260,8 @@ public class TestDirectoryScanner {
     try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) {
       int numVolumes = refs.size();
       int index = rand.nextInt(numVolumes - 1);
-
-      File finalizedDir = refs.get(index).getFinalizedDir(bpid);
+      File finalizedDir = ((FsVolumeImpl) refs.get(index))
+          .getFinalizedDir(bpid);
       File file = new File(finalizedDir, getMetaFile(id));
       if (file.createNewFile()) {
         LOG.info("Created metafile " + file.getName());
@@ -271,7 +278,8 @@ public class TestDirectoryScanner {
       int numVolumes = refs.size();
       int index = rand.nextInt(numVolumes - 1);
 
-      File finalizedDir = refs.get(index).getFinalizedDir(bpid);
+      File finalizedDir =
+          ((FsVolumeImpl) refs.get(index)).getFinalizedDir(bpid);
       File file = new File(finalizedDir, getBlockFile(id));
       if (file.createNewFile()) {
         LOG.info("Created block file " + file.getName());
@@ -311,7 +319,7 @@ public class TestDirectoryScanner {
     scanner.reconcile();
     
     assertTrue(scanner.diffs.containsKey(bpid));
-    LinkedList<DirectoryScanner.ScanInfo> diff = scanner.diffs.get(bpid);
+    LinkedList<FsVolumeSpi.ScanInfo> diff = scanner.diffs.get(bpid);
     assertTrue(scanner.stats.containsKey(bpid));
     DirectoryScanner.Stats stats = scanner.stats.get(bpid);
     
@@ -820,17 +828,6 @@ public class TestDirectoryScanner {
       return 0;
     }
     
-    @Override
-    public String getBasePath() {
-      return (new File("/base")).getAbsolutePath();
-    }
-    
-    @Override
-    public String getPath(String bpid) throws IOException {
-      return (new File("/base/current/" + bpid)).getAbsolutePath();
-    }
-
-    @Override
     public File getFinalizedDir(String bpid) throws IOException {
       return new File("/base/current/" + bpid + "/finalized");
     }
@@ -877,6 +874,29 @@ public class TestDirectoryScanner {
     public FsDatasetSpi getDataset() {
       throw new UnsupportedOperationException();
     }
+
+    @Override
+    public StorageLocation getStorageLocation() {
+      return null;
+    }
+
+    @Override
+    public URI getBaseURI() {
+      return (new File("/base")).toURI();
+    }
+
+    @Override
+    public DF getUsageStats(Configuration conf) {
+      return null;
+    }
+
+    @Override
+    public LinkedList<ScanInfo> compileReport(String bpid,
+        LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+        throws InterruptedException, IOException {
+      return null;
+    }
+
   }
 
   private final static TestFsVolumeSpi TEST_VOLUME = new TestFsVolumeSpi();
@@ -887,8 +907,8 @@ public class TestDirectoryScanner {
       
   void testScanInfoObject(long blockId, File blockFile, File metaFile)
       throws Exception {
-    DirectoryScanner.ScanInfo scanInfo =
-        new DirectoryScanner.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME);
+    FsVolumeSpi.ScanInfo scanInfo =
+        new FsVolumeSpi.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME);
     assertEquals(blockId, scanInfo.getBlockId());
     if (blockFile != null) {
       assertEquals(blockFile.getAbsolutePath(),
@@ -906,8 +926,8 @@ public class TestDirectoryScanner {
   }
   
   void testScanInfoObject(long blockId) throws Exception {
-    DirectoryScanner.ScanInfo scanInfo =
-        new DirectoryScanner.ScanInfo(blockId, null, null, null);
+    FsVolumeSpi.ScanInfo scanInfo =
+        new FsVolumeSpi.ScanInfo(blockId, null, null, null);
     assertEquals(blockId, scanInfo.getBlockId());
     assertNull(scanInfo.getBlockFile());
     assertNull(scanInfo.getMetaFile());
@@ -963,8 +983,8 @@ public class TestDirectoryScanner {
       List<FsVolumeSpi> volumes = new ArrayList<>();
       Iterator<FsVolumeSpi> iterator = fds.getFsVolumeReferences().iterator();
       while (iterator.hasNext()) {
-        FsVolumeSpi volume = iterator.next();
-        FsVolumeSpi spy = Mockito.spy(volume);
+        FsVolumeImpl volume = (FsVolumeImpl) iterator.next();
+        FsVolumeImpl spy = Mockito.spy(volume);
         Mockito.doThrow(new IOException("Error while getFinalizedDir"))
             .when(spy).getFinalizedDir(volume.getBlockPoolList()[0]);
         volumes.add(spy);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index 86d2ff4..2103392 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -199,7 +199,7 @@ public class TestDiskError {
       try (FsDatasetSpi.FsVolumeReferences volumes =
           dn.getFSDataset().getFsVolumeReferences()) {
         for (FsVolumeSpi vol : volumes) {
-          String dir = vol.getBasePath();
+          String dir = vol.getStorageLocation().getFile().getAbsolutePath();
           Path dataDir = new Path(dir);
           FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
           assertEquals("Permission for dir: " + dataDir + ", is " + actual +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 1268108..7b7f04f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -56,12 +56,14 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public void addVolume(StorageLocation location, List<NamespaceInfo> nsInfos) throws IOException {
-
+  public void addVolume(StorageLocation location, List<NamespaceInfo> nsInfos)
+      throws IOException {
   }
 
   @Override
-  public void removeVolumes(Set<File> volumes, boolean clearFailure) {
+  public void removeVolumes(Collection<StorageLocation> volumes,
+      boolean clearFailure) {
+    throw new UnsupportedOperationException();
   }
 
   @Override
@@ -242,7 +244,7 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public Set<File> checkDataDir() {
+  public Set<StorageLocation> checkDataDir() {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
index 985a259..83d6c4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
@@ -18,11 +18,16 @@
 
 package org.apache.hadoop.hdfs.server.datanode.extdataset;
 
-import java.io.File;
 import java.io.IOException;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
+import java.util.LinkedList;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -44,21 +49,6 @@ public class ExternalVolumeImpl implements FsVolumeSpi {
   }
 
   @Override
-  public String getBasePath() {
-    return null;
-  }
-
-  @Override
-  public String getPath(String bpid) throws IOException {
-    return null;
-  }
-
-  @Override
-  public File getFinalizedDir(String bpid) throws IOException {
-    return null;
-  }
-
-  @Override
   public String getStorageID() {
     return null;
   }
@@ -100,4 +90,26 @@ public class ExternalVolumeImpl implements FsVolumeSpi {
   public FsDatasetSpi getDataset() {
     return null;
   }
+
+  @Override
+  public StorageLocation getStorageLocation() {
+    return null;
+  }
+
+  @Override
+  public URI getBaseURI() {
+    return null;
+  }
+
+  @Override
+  public DF getUsageStats(Configuration conf) {
+    return null;
+  }
+
+  @Override
+  public LinkedList<ScanInfo> compileReport(String bpid,
+      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+      throws InterruptedException, IOException {
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
index a465c05..07ddb59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
@@ -374,9 +374,12 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
   public long getRawCapacity() throws IOException {
     try (FsVolumeReferences volRefs = dataset.getFsVolumeReferences()) {
       Preconditions.checkState(volRefs.size() != 0);
-      DF df = new DF(new File(volRefs.get(0).getBasePath()),
-          dataset.datanode.getConf());
-      return df.getCapacity();
+      DF df = volRefs.get(0).getUsageStats(dataset.datanode.getConf());
+      if (df != null) {
+        return df.getCapacity();
+      } else {
+        return -1;
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 179b617..e48aae0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
 import org.apache.hadoop.hdfs.server.datanode.DNConf;
@@ -50,7 +51,9 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.MultipleIOException;
@@ -122,8 +125,10 @@ public class TestFsDatasetImpl {
   
   private final static String BLOCKPOOL = "BP-TEST";
 
-  private static Storage.StorageDirectory createStorageDirectory(File root) {
-    Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
+  private static Storage.StorageDirectory createStorageDirectory(File root)
+      throws SecurityException, IOException {
+    Storage.StorageDirectory sd = new Storage.StorageDirectory(
+        StorageLocation.parse(root.toURI().toString()));
     DataStorage.createStorageID(sd, false);
     return sd;
   }
@@ -196,16 +201,18 @@ public class TestFsDatasetImpl {
     for (int i = 0; i < numNewVolumes; i++) {
       String path = BASE_DIR + "/newData" + i;
       String pathUri = new Path(path).toUri().toString();
-      expectedVolumes.add(new File(pathUri).toString());
+      expectedVolumes.add(new File(pathUri).getAbsolutePath());
       StorageLocation loc = StorageLocation.parse(pathUri);
       Storage.StorageDirectory sd = createStorageDirectory(new File(path));
       DataStorage.VolumeBuilder builder =
           new DataStorage.VolumeBuilder(storage, sd);
-      when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
+      when(storage.prepareVolume(eq(datanode), eq(loc),
           anyListOf(NamespaceInfo.class)))
           .thenReturn(builder);
 
       dataset.addVolume(loc, nsInfos);
+      LOG.info("expectedVolumes " + i + " is " +
+          new File(pathUri).getAbsolutePath());
     }
 
     assertEquals(totalVolumes, getNumVolumes());
@@ -215,7 +222,9 @@ public class TestFsDatasetImpl {
     try (FsDatasetSpi.FsVolumeReferences volumes =
         dataset.getFsVolumeReferences()) {
       for (int i = 0; i < numNewVolumes; i++) {
-        actualVolumes.add(volumes.get(numExistingVolumes + i).getBasePath());
+        String volumeName = volumes.get(numExistingVolumes + i).toString();
+        actualVolumes.add(volumeName);
+        LOG.info("actualVolume " + i + " is " + volumeName);
       }
     }
     assertEquals(actualVolumes.size(), expectedVolumes.size());
@@ -262,9 +271,18 @@ public class TestFsDatasetImpl {
     final String[] dataDirs =
         conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
     final String volumePathToRemove = dataDirs[0];
-    Set<File> volumesToRemove = new HashSet<>();
-    volumesToRemove.add(StorageLocation.parse(volumePathToRemove).getFile());
-
+    Set<StorageLocation> volumesToRemove = new HashSet<>();
+    volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
+
+    FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
+    FsVolumeImpl volumeToRemove = null;
+    for (FsVolumeSpi vol: volReferences) {
+      if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
+        volumeToRemove = (FsVolumeImpl) vol;
+      }
+    }
+    assertTrue(volumeToRemove != null);
+    volReferences.close();
     dataset.removeVolumes(volumesToRemove, true);
     int expectedNumVolumes = dataDirs.length - 1;
     assertEquals("The volume has been removed from the volumeList.",
@@ -273,7 +291,7 @@ public class TestFsDatasetImpl {
         expectedNumVolumes, dataset.storageMap.size());
 
     try {
-      dataset.asyncDiskService.execute(volumesToRemove.iterator().next(),
+      dataset.asyncDiskService.execute(volumeToRemove,
           new Runnable() {
             @Override
             public void run() {}
@@ -281,7 +299,7 @@ public class TestFsDatasetImpl {
       fail("Expect RuntimeException: the volume has been removed from the "
            + "AsyncDiskService.");
     } catch (RuntimeException e) {
-      GenericTestUtils.assertExceptionContains("Cannot find root", e);
+      GenericTestUtils.assertExceptionContains("Cannot find volume", e);
     }
 
     int totalNumReplicas = 0;
@@ -306,7 +324,7 @@ public class TestFsDatasetImpl {
     Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
     DataStorage.VolumeBuilder builder =
         new DataStorage.VolumeBuilder(storage, sd);
-    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
+    when(storage.prepareVolume(eq(datanode), eq(loc),
         anyListOf(NamespaceInfo.class)))
         .thenReturn(builder);
 
@@ -315,8 +333,8 @@ public class TestFsDatasetImpl {
 
     when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
     when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
-    Set<File> volumesToRemove = new HashSet<>();
-    volumesToRemove.add(loc.getFile());
+    Set<StorageLocation> volumesToRemove = new HashSet<>();
+    volumesToRemove.add(loc);
     dataset.removeVolumes(volumesToRemove, true);
     assertEquals(numExistingVolumes, getNumVolumes());
   }
@@ -336,7 +354,8 @@ public class TestFsDatasetImpl {
     for (int i = 0; i < NUM_VOLUMES; i++) {
       FsVolumeImpl volume = mock(FsVolumeImpl.class);
       oldVolumes.add(volume);
-      when(volume.getBasePath()).thenReturn("data" + i);
+      when(volume.getStorageLocation()).thenReturn(
+          StorageLocation.parse(new File("data" + i).toURI().toString()));
       when(volume.checkClosed()).thenReturn(true);
       FsVolumeReference ref = mock(FsVolumeReference.class);
       when(ref.getVolume()).thenReturn(volume);
@@ -348,13 +367,16 @@ public class TestFsDatasetImpl {
     final FsVolumeImpl newVolume = mock(FsVolumeImpl.class);
     final FsVolumeReference newRef = mock(FsVolumeReference.class);
     when(newRef.getVolume()).thenReturn(newVolume);
-    when(newVolume.getBasePath()).thenReturn("data4");
+    when(newVolume.getStorageLocation()).thenReturn(
+        StorageLocation.parse(new File("data4").toURI().toString()));
     FsVolumeImpl blockedVolume = volumeList.getVolumes().get(1);
     doAnswer(new Answer() {
       @Override
       public Object answer(InvocationOnMock invocationOnMock)
           throws Throwable {
-        volumeList.removeVolume(new File("data4"), false);
+        volumeList.removeVolume(
+            StorageLocation.parse((new File("data4")).toURI().toString()),
+            false);
         volumeList.addVolume(newRef);
         return null;
       }
@@ -386,7 +408,8 @@ public class TestFsDatasetImpl {
     File badDir = new File(BASE_DIR, "bad");
     badDir.mkdirs();
     doReturn(mockVolume).when(spyDataset)
-        .createFsVolume(anyString(), any(File.class), any(StorageType.class));
+        .createFsVolume(anyString(), any(StorageDirectory.class),
+            any(StorageLocation.class));
     doThrow(new IOException("Failed to getVolumeMap()"))
       .when(mockVolume).getVolumeMap(
         anyString(),
@@ -396,7 +419,8 @@ public class TestFsDatasetImpl {
     Storage.StorageDirectory sd = createStorageDirectory(badDir);
     sd.lock();
     DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
-    when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
+    when(storage.prepareVolume(eq(datanode),
+        eq(StorageLocation.parse(badDir.toURI().toString())),
         Matchers.<List<NamespaceInfo>>any()))
         .thenReturn(builder);
 
@@ -540,7 +564,7 @@ public class TestFsDatasetImpl {
     DataStorage.VolumeBuilder builder =
         new DataStorage.VolumeBuilder(storage, sd);
     when(
-        storage.prepareVolume(eq(datanode), eq(loc.getFile()),
+        storage.prepareVolume(eq(datanode), eq(loc),
             anyListOf(NamespaceInfo.class))).thenReturn(builder);
 
     String cacheFilePath =
@@ -584,7 +608,7 @@ public class TestFsDatasetImpl {
     return dfsUsed;
   }
 
-  @Test(timeout = 30000)
+  @Test(timeout = 60000)
   public void testRemoveVolumeBeingWritten() throws Exception {
     // Will write and remove on dn0.
     final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0);
@@ -636,10 +660,9 @@ public class TestFsDatasetImpl {
 
     class VolRemoveThread extends Thread {
       public void run() {
-        Set<File> volumesToRemove = new HashSet<>();
+        Set<StorageLocation> volumesToRemove = new HashSet<>();
         try {
-          volumesToRemove.add(StorageLocation.parse(
-              dataset.getVolume(eb).getBasePath()).getFile());
+          volumesToRemove.add(dataset.getVolume(eb).getStorageLocation());
         } catch (Exception e) {
           LOG.info("Problem preparing volumes to remove: ", e);
           Assert.fail("Exception in remove volume thread, check log for " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index 3d4c38c..6eff300 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -22,7 +22,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
@@ -71,8 +73,13 @@ public class TestFsVolumeList {
     for (int i = 0; i < 3; i++) {
       File curDir = new File(baseDir, "nextvolume-" + i);
       curDir.mkdirs();
-      FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
-          conf, StorageType.DEFAULT);
+      FsVolumeImpl volume = new FsVolumeImplBuilder()
+          .setConf(conf)
+          .setDataset(dataset)
+          .setStorageID("storage-id")
+          .setStorageDirectory(
+              new StorageDirectory(StorageLocation.parse(curDir.getPath())))
+          .build();
       volume.setCapacityForTesting(1024 * 1024 * 1024);
       volumes.add(volume);
       volumeList.addVolume(volume.obtainReference());
@@ -109,8 +116,13 @@ public class TestFsVolumeList {
     for (int i = 0; i < 3; i++) {
       File curDir = new File(baseDir, "volume-" + i);
       curDir.mkdirs();
-      FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
-          conf, StorageType.DEFAULT);
+      FsVolumeImpl volume = new FsVolumeImplBuilder()
+          .setConf(conf)
+          .setDataset(dataset)
+          .setStorageID("storage-id")
+          .setStorageDirectory(
+              new StorageDirectory(StorageLocation.parse(curDir.getPath())))
+          .build();
       volumes.add(volume);
       volumeList.addVolume(volume.obtainReference());
     }
@@ -139,8 +151,13 @@ public class TestFsVolumeList {
         Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
     File volDir = new File(baseDir, "volume-0");
     volDir.mkdirs();
-    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.DEFAULT);
+    FsVolumeImpl volume = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(
+            new StorageDirectory(StorageLocation.parse(volDir.getPath())))
+        .build();
     FsVolumeReference ref = volume.obtainReference();
     volumeList.addVolume(ref);
     assertNull(ref.getVolume());
@@ -155,8 +172,13 @@ public class TestFsVolumeList {
     volDir.mkdirs();
     // when storage type reserved is not configured,should consider
     // dfs.datanode.du.reserved.
-    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
-        StorageType.RAM_DISK);
+    FsVolumeImpl volume = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[RAM_DISK]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 100L, volume.getReserved());
     // when storage type reserved is configured.
     conf.setLong(
@@ -165,17 +187,37 @@ public class TestFsVolumeList {
     conf.setLong(
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
             + StringUtils.toLowerCase(StorageType.SSD.toString()), 2L);
-    FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.RAM_DISK);
+    FsVolumeImpl volume1 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[RAM_DISK]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 1L, volume1.getReserved());
-    FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.SSD);
+    FsVolumeImpl volume2 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[SSD]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 2L, volume2.getReserved());
-    FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.DISK);
+    FsVolumeImpl volume3 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse("[DISK]"+volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 100L, volume3.getReserved());
-    FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir,
-        conf, StorageType.DEFAULT);
+    FsVolumeImpl volume4 = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse(volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     assertEquals("", 100L, volume4.getReserved());
   }
 
@@ -197,8 +239,13 @@ public class TestFsVolumeList {
     long actualNonDfsUsage = 300L;
     long reservedForReplicas = 50L;
     conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, duReserved);
-    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
-        StorageType.DEFAULT);
+    FsVolumeImpl volume = new FsVolumeImplBuilder().setDataset(dataset)
+        .setStorageDirectory(
+            new StorageDirectory(
+                StorageLocation.parse(volDir.getPath())))
+        .setStorageID("storage-id")
+        .setConf(conf)
+        .build();
     FsVolumeImpl spyVolume = Mockito.spy(volume);
     // Set Capacity for testing
     long testCapacity = diskCapacity - duReserved;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
index 794a887..7df0333 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
@@ -331,8 +331,8 @@ public class TestDiskBalancerWithMockMover {
         .getFsVolumeReferences();
 
     nodeID = dataNode.getDatanodeUuid();
-    sourceName = references.get(0).getBasePath();
-    destName = references.get(1).getBasePath();
+    sourceName = references.get(0).getBaseURI().getPath();
+    destName = references.get(1).getBaseURI().getPath();
     sourceUUID = references.get(0).getStorageID();
     destUUID = references.get(1).getStorageID();
     references.close();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/51] [abbrv] hadoop git commit: HDFS-10916. Switch from "raw" to "system" xattr namespace for erasure coding policy. (Andrew Wang via lei)

Posted by ae...@apache.org.
HDFS-10916. Switch from "raw" to "system" xattr namespace for erasure coding policy. (Andrew Wang via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/809cfd27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/809cfd27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/809cfd27

Branch: refs/heads/HDFS-7240
Commit: 809cfd27a30900d2c0e0e133574de49d0b4538cf
Parents: ecb51b8
Author: Lei Xu <le...@apache.org>
Authored: Tue Oct 11 10:04:46 2016 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Tue Oct 11 10:04:46 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/809cfd27/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 3798394..d112a48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -369,7 +369,7 @@ public interface HdfsServerConstants {
   String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
       "security.hdfs.unreadable.by.superuser";
   String XATTR_ERASURECODING_POLICY =
-      "raw.hdfs.erasurecoding.policy";
+      "system.hdfs.erasurecoding.policy";
 
   long BLOCK_GROUP_INDEX_MASK = 15;
   byte MAX_BLOCKS_IN_GROUP = 16;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/51] [abbrv] hadoop git commit: HADOOP-13024. Distcp with -delete feature on raw data not implemented. Contributed by Mavin Martin.

Posted by ae...@apache.org.
HADOOP-13024. Distcp with -delete feature on raw data not implemented. Contributed by Mavin Martin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a85d079
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a85d079
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a85d079

Branch: refs/heads/HDFS-7240
Commit: 0a85d079838f532a13ca237300386d1b3bc1b178
Parents: 8c721aa
Author: Jing Zhao <ji...@apache.org>
Authored: Thu Oct 13 13:24:37 2016 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Thu Oct 13 13:24:54 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/tools/DistCpConstants.java    | 12 +++++-
 .../hadoop/tools/mapred/CopyCommitter.java      |  5 ++-
 .../hadoop/tools/TestDistCpWithRawXAttrs.java   | 45 +++++++++-----------
 .../hadoop/tools/util/DistCpTestUtils.java      | 32 ++++++++------
 4 files changed, 56 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 96f364c..6171aa9 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.tools;
  * limitations under the License.
  */
 
+import org.apache.hadoop.fs.Path;
+
 /**
  * Utility class to hold commonly used constants.
  */
@@ -125,9 +127,17 @@ public class DistCpConstants {
   public static final int SPLIT_RATIO_DEFAULT  = 2;
 
   /**
+   * Constants for NONE file deletion
+   */
+  public static final String NONE_PATH_NAME = "/NONE";
+  public static final Path NONE_PATH = new Path(NONE_PATH_NAME);
+  public static final Path RAW_NONE_PATH = new Path(
+      DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME + NONE_PATH_NAME);
+
+  /**
    * Value of reserved raw HDFS directory when copying raw.* xattrs.
    */
-  static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
+  public static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
 
   static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 6d2fef5..dd653b2 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -238,7 +238,10 @@ public class CopyCommitter extends FileOutputCommitter {
     List<Path> targets = new ArrayList<Path>(1);
     Path targetFinalPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
     targets.add(targetFinalPath);
-    DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
+    Path resultNonePath = Path.getPathWithoutSchemeAndAuthority(targetFinalPath)
+        .toString().startsWith(DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME)
+        ? DistCpConstants.RAW_NONE_PATH : DistCpConstants.NONE_PATH;
+    DistCpOptions options = new DistCpOptions(targets, resultNonePath);
     //
     // Set up options to be the same from the CopyListing.buildListing's perspective,
     // so to collect similar listings as when doing the copy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
index 5aef51a..8adc2cf 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
@@ -82,14 +82,7 @@ public class TestDistCpWithRawXAttrs {
     final String relDst = "/./.reserved/../.reserved/raw/../raw/dest/../dest";
     doTestPreserveRawXAttrs(relSrc, relDst, "-px", true, true,
         DistCpConstants.SUCCESS);
-    doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, "-px",
-        false, true, DistCpConstants.SUCCESS);
-    doTestPreserveRawXAttrs(rootedSrcName, rawDestName, "-px",
-        false, true, DistCpConstants.INVALID_ARGUMENT);
-    doTestPreserveRawXAttrs(rawSrcName, rootedDestName, "-px",
-        false, true, DistCpConstants.INVALID_ARGUMENT);
-    doTestPreserveRawXAttrs(rawSrcName, rawDestName, "-px",
-        true, true, DistCpConstants.SUCCESS);
+    doTestStandardPreserveRawXAttrs("-px", true);
     final Path savedWd = fs.getWorkingDirectory();
     try {
       fs.setWorkingDirectory(new Path("/.reserved/raw"));
@@ -103,27 +96,18 @@ public class TestDistCpWithRawXAttrs {
   /* Test that XAttrs are not preserved and raw.* are when appropriate. */
   @Test
   public void testPreserveRawXAttrs2() throws Exception {
-    doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, "-p",
-        false, false, DistCpConstants.SUCCESS);
-    doTestPreserveRawXAttrs(rootedSrcName, rawDestName, "-p",
-        false, false, DistCpConstants.INVALID_ARGUMENT);
-    doTestPreserveRawXAttrs(rawSrcName, rootedDestName, "-p",
-        false, false, DistCpConstants.INVALID_ARGUMENT);
-    doTestPreserveRawXAttrs(rawSrcName, rawDestName, "-p",
-        true, false, DistCpConstants.SUCCESS);
+    doTestStandardPreserveRawXAttrs("-p", false);
   }
 
   /* Test that XAttrs are not preserved and raw.* are when appropriate. */
   @Test
   public void testPreserveRawXAttrs3() throws Exception {
-    doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, null,
-        false, false, DistCpConstants.SUCCESS);
-    doTestPreserveRawXAttrs(rootedSrcName, rawDestName, null,
-        false, false, DistCpConstants.INVALID_ARGUMENT);
-    doTestPreserveRawXAttrs(rawSrcName, rootedDestName, null,
-        false, false, DistCpConstants.INVALID_ARGUMENT);
-    doTestPreserveRawXAttrs(rawSrcName, rawDestName, null,
-        true, false, DistCpConstants.SUCCESS);
+    doTestStandardPreserveRawXAttrs(null, false);
+  }
+
+  @Test
+  public void testPreserveRawXAttrs4() throws Exception {
+    doTestStandardPreserveRawXAttrs("-update -delete", false);
   }
 
   private static Path[] pathnames = { new Path("dir1"),
@@ -145,6 +129,19 @@ public class TestDistCpWithRawXAttrs {
     }
   }
 
+  private void doTestStandardPreserveRawXAttrs(String options,
+      boolean expectUser)
+      throws Exception {
+    doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, options,
+        false, expectUser, DistCpConstants.SUCCESS);
+    doTestPreserveRawXAttrs(rootedSrcName, rawDestName, options,
+        false, expectUser, DistCpConstants.INVALID_ARGUMENT);
+    doTestPreserveRawXAttrs(rawSrcName, rootedDestName, options,
+        false, expectUser, DistCpConstants.INVALID_ARGUMENT);
+    doTestPreserveRawXAttrs(rawSrcName, rawDestName, options,
+        true, expectUser, DistCpConstants.SUCCESS);
+  }
+
   private void doTestPreserveRawXAttrs(String src, String dest,
       String preserveOpts, boolean expectRaw, boolean expectUser,
       int expectedExitCode) throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java
index 2721638..624f7d5 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java
@@ -18,21 +18,20 @@
 
 package org.apache.hadoop.tools.util;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-
 import org.apache.hadoop.tools.DistCp;
 import org.apache.hadoop.util.ToolRunner;
 
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 /**
  * Utility class for DistCpTests
  */
@@ -79,10 +78,19 @@ public class DistCpTestUtils {
   public static void assertRunDistCp(int exitCode, String src, String dst,
       String options, Configuration conf)
       throws Exception {
+    assertRunDistCp(exitCode, src, dst,
+        options == null ? new String[0] : options.trim().split(" "), conf);
+  }
+
+  private static void assertRunDistCp(int exitCode, String src, String dst,
+      String[] options, Configuration conf)
+      throws Exception {
     DistCp distCp = new DistCp(conf, null);
-    String[] optsArr = options == null ?
-        new String[] { src, dst } :
-        new String[] { options, src, dst };
+    String[] optsArr = new String[options.length + 2];
+    System.arraycopy(options, 0, optsArr, 0, options.length);
+    optsArr[optsArr.length - 2] = src;
+    optsArr[optsArr.length - 1] = dst;
+
     assertEquals(exitCode,
         ToolRunner.run(conf, distCp, optsArr));
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/51] [abbrv] hadoop git commit: HADOOP-12579. Deprecate WriteableRPCEngine. Contributed by Wei Zhou

Posted by ae...@apache.org.
HADOOP-12579. Deprecate WriteableRPCEngine. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec0b7071
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec0b7071
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec0b7071

Branch: refs/heads/HDFS-7240
Commit: ec0b70716c8e6509654a3975d3ca139a0144cc8e
Parents: 4d10621
Author: Kai Zheng <ka...@intel.com>
Authored: Sun Oct 9 15:07:03 2016 +0600
Committer: Kai Zheng <ka...@intel.com>
Committed: Sun Oct 9 15:07:03 2016 +0600

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java  | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec0b7071/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index a9dbb41..3d6d461 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -46,6 +46,7 @@ import org.apache.htrace.core.Tracer;
 
 /** An RpcEngine implementation for Writable data. */
 @InterfaceStability.Evolving
+@Deprecated
 public class WritableRpcEngine implements RpcEngine {
   private static final Log LOG = LogFactory.getLog(RPC.class);
   
@@ -331,6 +332,7 @@ public class WritableRpcEngine implements RpcEngine {
 
 
   /** An RPC Server. */
+  @Deprecated
   public static class Server extends RPC.Server {
     /** 
      * Construct an RPC server.
@@ -443,7 +445,8 @@ public class WritableRpcEngine implements RpcEngine {
         value = value.substring(0, 55)+"...";
       LOG.info(value);
     }
-    
+
+    @Deprecated
     static class WritableRpcInvoker implements RpcInvoker {
 
      @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/51] [abbrv] hadoop git commit: HDFS-11002. Fix broken attr/getfattr/setfattr links in ExtendedAttributes.md. Contributed by Mingliang Liu.

Posted by ae...@apache.org.
HDFS-11002. Fix broken attr/getfattr/setfattr links in ExtendedAttributes.md. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/901eca00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/901eca00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/901eca00

Branch: refs/heads/HDFS-7240
Commit: 901eca004d0e7e413b109a93128892176c808d61
Parents: 12d739a
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Oct 13 14:29:30 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Oct 13 14:29:30 2016 +0900

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/ExtendedAttributes.md          | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/901eca00/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
index 5a20986..eb527ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
@@ -30,7 +30,7 @@ Overview
 
 ### HDFS extended attributes
 
-Extended attributes in HDFS are modeled after extended attributes in Linux (see the Linux manpage for [attr(5)](http://www.bestbits.at/acl/man/man5/attr.txt) and [related documentation](http://www.bestbits.at/acl/)). An extended attribute is a *name-value pair*, with a string name and binary value. Xattrs names must also be prefixed with a *namespace*. For example, an xattr named *myXattr* in the *user* namespace would be specified as **user.myXattr**. Multiple xattrs can be associated with a single inode.
+Extended attributes in HDFS are modeled after extended attributes in Linux (see the Linux manpage for [attr(5)](http://man7.org/linux/man-pages/man5/attr.5.html)). An extended attribute is a *name-value pair*, with a string name and binary value. Xattrs names must also be prefixed with a *namespace*. For example, an xattr named *myXattr* in the *user* namespace would be specified as **user.myXattr**. Multiple xattrs can be associated with a single inode.
 
 ### Namespaces and Permissions
 
@@ -49,7 +49,7 @@ The `raw` namespace is reserved for internal system attributes that sometimes ne
 Interacting with extended attributes
 ------------------------------------
 
-The Hadoop shell has support for interacting with extended attributes via `hadoop fs -getfattr` and `hadoop fs -setfattr`. These commands are styled after the Linux [getfattr(1)](http://www.bestbits.at/acl/man/man1/getfattr.txt) and [setfattr(1)](http://www.bestbits.at/acl/man/man1/setfattr.txt) commands.
+The Hadoop shell has support for interacting with extended attributes via `hadoop fs -getfattr` and `hadoop fs -setfattr`. These commands are styled after the Linux [getfattr(1)](http://man7.org/linux/man-pages/man1/getfattr.1.html) and [setfattr(1)](http://man7.org/linux/man-pages/man1/setfattr.1.html) commands.
 
 ### getfattr
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/51] [abbrv] hadoop git commit: HDFS-10988. Refactor TestBalancerBandwidth. Contributed by Brahma Reddy Battula

Posted by ae...@apache.org.
HDFS-10988. Refactor TestBalancerBandwidth. Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9638186
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9638186
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9638186

Branch: refs/heads/HDFS-7240
Commit: b963818621c200160bb37624f177bdcb059de4eb
Parents: 65912e4
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Oct 10 13:19:17 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Oct 10 13:19:17 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/TestBalancerBandwidth.java      | 57 +++++++++-----------
 1 file changed, 25 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9638186/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
index 6e6bbee..6bbe3a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
@@ -24,13 +24,15 @@ import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 /**
@@ -54,9 +56,8 @@ public class TestBalancerBandwidth {
         DEFAULT_BANDWIDTH);
 
     /* Create and start cluster */
-    MiniDFSCluster cluster = 
-      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
-    try {
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(NUM_OF_DATANODES).build()) {
       cluster.waitActive();
 
       DistributedFileSystem fs = cluster.getFileSystem();
@@ -65,12 +66,6 @@ public class TestBalancerBandwidth {
       // Ensure value from the configuration is reflected in the datanodes.
       assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(0).getBalancerBandwidth());
       assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(1).getBalancerBandwidth());
-      ClientDatanodeProtocol dn1Proxy = DFSUtilClient
-          .createClientDatanodeProtocolProxy(datanodes.get(0).getDatanodeId(),
-              conf, 60000, false);
-      ClientDatanodeProtocol dn2Proxy = DFSUtilClient
-          .createClientDatanodeProtocolProxy(datanodes.get(1).getDatanodeId(),
-              conf, 60000, false);
       DFSAdmin admin = new DFSAdmin(conf);
       String dn1Address = datanodes.get(0).ipcServer.getListenerAddress()
           .getHostName() + ":" + datanodes.get(0).getIpcPort();
@@ -79,51 +74,49 @@ public class TestBalancerBandwidth {
 
       // verifies the dfsadmin command execution
       String[] args = new String[] { "-getBalancerBandwidth", dn1Address };
-      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, DEFAULT_BANDWIDTH);
+      runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
       args = new String[] { "-getBalancerBandwidth", dn2Address };
-      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, DEFAULT_BANDWIDTH);
+      runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
 
       // Dynamically change balancer bandwidth and ensure the updated value
       // is reflected on the datanodes.
       long newBandwidth = 12 * DEFAULT_BANDWIDTH; // 12M bps
       fs.setBalancerBandwidth(newBandwidth);
+      verifyBalancerBandwidth(datanodes, newBandwidth);
 
-      // Give it a few seconds to propogate new the value to the datanodes.
-      try {
-        Thread.sleep(5000);
-      } catch (Exception e) {}
-
-      assertEquals(newBandwidth, (long) datanodes.get(0).getBalancerBandwidth());
-      assertEquals(newBandwidth, (long) datanodes.get(1).getBalancerBandwidth());
       // verifies the dfsadmin command execution
       args = new String[] { "-getBalancerBandwidth", dn1Address };
-      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, newBandwidth);
+      runGetBalancerBandwidthCmd(admin, args, newBandwidth);
       args = new String[] { "-getBalancerBandwidth", dn2Address };
-      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, newBandwidth);
+      runGetBalancerBandwidthCmd(admin, args, newBandwidth);
 
       // Dynamically change balancer bandwidth to 0. Balancer bandwidth on the
       // datanodes should remain as it was.
       fs.setBalancerBandwidth(0);
 
-      // Give it a few seconds to propogate new the value to the datanodes.
-      try {
-        Thread.sleep(5000);
-      } catch (Exception e) {}
+      verifyBalancerBandwidth(datanodes, newBandwidth);
 
-      assertEquals(newBandwidth, (long) datanodes.get(0).getBalancerBandwidth());
-      assertEquals(newBandwidth, (long) datanodes.get(1).getBalancerBandwidth());
       // verifies the dfsadmin command execution
       args = new String[] { "-getBalancerBandwidth", dn1Address };
-      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, newBandwidth);
+      runGetBalancerBandwidthCmd(admin, args, newBandwidth);
       args = new String[] { "-getBalancerBandwidth", dn2Address };
-      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, newBandwidth);
-    } finally {
-      cluster.shutdown();
+      runGetBalancerBandwidthCmd(admin, args, newBandwidth);
     }
   }
 
+  private void verifyBalancerBandwidth(final ArrayList<DataNode> datanodes,
+      final long newBandwidth) throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return (long) datanodes.get(0).getBalancerBandwidth() == newBandwidth
+            && (long) datanodes.get(1).getBalancerBandwidth() == newBandwidth;
+      }
+    }, 100, 60 * 1000);
+  }
+
   private void runGetBalancerBandwidthCmd(DFSAdmin admin, String[] args,
-      ClientDatanodeProtocol proxy, long expectedBandwidth) throws Exception {
+      long expectedBandwidth) throws Exception {
     PrintStream initialStdOut = System.out;
     outContent.reset();
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/51] [abbrv] hadoop git commit: HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. Contributed by Kihwal Lee.

Posted by ae...@apache.org.
HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9454dc5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9454dc5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9454dc5e

Branch: refs/heads/HDFS-7240
Commit: 9454dc5e8091354cd0a4b8c8aa5f4004529db5d5
Parents: 901eca0
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Oct 13 08:47:15 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Oct 13 08:47:15 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9454dc5e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 4887e35..4247a67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -332,7 +332,7 @@ public class NamenodeWebHdfsMethods {
     } else {
       //generate a token
       final Token<? extends TokenIdentifier> t = generateDelegationToken(
-          namenode, ugi, userPrincipal.getName());
+          namenode, ugi, null);
       delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
     }
     final String query = op.toQueryString() + delegationQuery


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/51] [abbrv] hadoop git commit: YARN-5677. RM should transition to standby when connection is lost for an extended period. (Daniel Templeton via kasha)

Posted by ae...@apache.org.
YARN-5677. RM should transition to standby when connection is lost for an extended period. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6476934a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6476934a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6476934a

Branch: refs/heads/HDFS-7240
Commit: 6476934ae5de1be7988ab198b673d82fe0f006e3
Parents: 6378845
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Tue Oct 11 22:07:10 2016 -0700
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Tue Oct 11 22:07:10 2016 -0700

----------------------------------------------------------------------
 .../resourcemanager/EmbeddedElectorService.java |  59 +++++-
 .../resourcemanager/TestRMEmbeddedElector.java  | 191 +++++++++++++++++++
 2 files changed, 244 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6476934a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
index 72327e8..88d2e10 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,6 +40,8 @@ import org.apache.zookeeper.data.ACL;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Timer;
+import java.util.TimerTask;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -54,6 +57,10 @@ public class EmbeddedElectorService extends AbstractService
 
   private byte[] localActiveNodeInfo;
   private ActiveStandbyElector elector;
+  private long zkSessionTimeout;
+  private Timer zkDisconnectTimer;
+  @VisibleForTesting
+  final Object zkDisconnectLock = new Object();
 
   EmbeddedElectorService(RMContext rmContext) {
     super(EmbeddedElectorService.class.getName());
@@ -80,7 +87,7 @@ public class EmbeddedElectorService extends AbstractService
         YarnConfiguration.DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH);
     String electionZNode = zkBasePath + "/" + clusterId;
 
-    long zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
+    zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
         YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
 
     List<ACL> zkAcls = RMZKUtils.getZKAcls(conf);
@@ -123,6 +130,8 @@ public class EmbeddedElectorService extends AbstractService
 
   @Override
   public void becomeActive() throws ServiceFailedException {
+    cancelDisconnectTimer();
+
     try {
       rmContext.getRMAdminService().transitionToActive(req);
     } catch (Exception e) {
@@ -132,6 +141,8 @@ public class EmbeddedElectorService extends AbstractService
 
   @Override
   public void becomeStandby() {
+    cancelDisconnectTimer();
+
     try {
       rmContext.getRMAdminService().transitionToStandby(req);
     } catch (Exception e) {
@@ -139,13 +150,49 @@ public class EmbeddedElectorService extends AbstractService
     }
   }
 
+  /**
+   * Stop the disconnect timer.  Any running tasks will be allowed to complete.
+   */
+  private void cancelDisconnectTimer() {
+    synchronized (zkDisconnectLock) {
+      if (zkDisconnectTimer != null) {
+        zkDisconnectTimer.cancel();
+        zkDisconnectTimer = null;
+      }
+    }
+  }
+
+  /**
+   * When the ZK client loses contact with ZK, this method will be called to
+   * allow the RM to react. Because the loss of connection can be noticed
+   * before the session timeout happens, it is undesirable to transition
+   * immediately. Instead the method starts a timer that will wait
+   * {@link YarnConfiguration#RM_ZK_TIMEOUT_MS} milliseconds before
+   * initiating the transition into standby state.
+   */
   @Override
   public void enterNeutralMode() {
-    /**
-     * Possibly due to transient connection issues. Do nothing.
-     * TODO: Might want to keep track of how long in this state and transition
-     * to standby.
-     */
+    LOG.warn("Lost contact with Zookeeper. Transitioning to standby in "
+        + zkSessionTimeout + " ms if connection is not reestablished.");
+
+    // If we've just become disconnected, start a timer.  When the time's up,
+    // we'll transition to standby.
+    synchronized (zkDisconnectLock) {
+      if (zkDisconnectTimer == null) {
+        zkDisconnectTimer = new Timer("Zookeeper disconnect timer");
+        zkDisconnectTimer.schedule(new TimerTask() {
+          @Override
+          public void run() {
+            synchronized (zkDisconnectLock) {
+              // Only run if the timer hasn't been cancelled
+              if (zkDisconnectTimer != null) {
+                becomeStandby();
+              }
+            }
+          }
+        }, zkSessionTimeout);
+      }
+    }
   }
 
   @SuppressWarnings(value = "unchecked")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6476934a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 20b1c0e..bfd0b4e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -28,6 +28,14 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicBoolean;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.atMost;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestRMEmbeddedElector extends ClientBaseWithFixes {
   private static final Log LOG =
@@ -41,6 +49,14 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
   private Configuration conf;
   private AtomicBoolean callbackCalled;
 
+  private enum SyncTestType {
+    ACTIVE,
+    STANDBY,
+    NEUTRAL,
+    ACTIVE_TIMING,
+    STANDBY_TIMING
+  }
+
   @Before
   public void setup() throws IOException {
     conf = new YarnConfiguration();
@@ -79,6 +95,181 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
     LOG.info("Stopped RM");
   }
 
+  /**
+   * Test that neutral mode plays well with all other transitions.
+   *
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  @Test
+  public void testCallbackSynchronization()
+      throws IOException, InterruptedException {
+    testCallbackSynchronization(SyncTestType.ACTIVE);
+    testCallbackSynchronization(SyncTestType.STANDBY);
+    testCallbackSynchronization(SyncTestType.NEUTRAL);
+    testCallbackSynchronization(SyncTestType.ACTIVE_TIMING);
+    testCallbackSynchronization(SyncTestType.STANDBY_TIMING);
+  }
+
+  /**
+   * Helper method to test that neutral mode plays well with other transitions.
+   *
+   * @param type the type of test to run
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronization(SyncTestType type)
+      throws IOException, InterruptedException {
+    AdminService as = mock(AdminService.class);
+    RMContext rc = mock(RMContext.class);
+    Configuration myConf = new Configuration(conf);
+
+    myConf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 50);
+    when(rc.getRMAdminService()).thenReturn(as);
+
+    EmbeddedElectorService ees = new EmbeddedElectorService(rc);
+    ees.init(myConf);
+
+    ees.enterNeutralMode();
+
+    switch (type) {
+    case ACTIVE:
+      testCallbackSynchronizationActive(as, ees);
+      break;
+    case STANDBY:
+      testCallbackSynchronizationStandby(as, ees);
+      break;
+    case NEUTRAL:
+      testCallbackSynchronizationNeutral(as, ees);
+      break;
+    case ACTIVE_TIMING:
+      testCallbackSynchronizationTimingActive(as, ees);
+      break;
+    case STANDBY_TIMING:
+      testCallbackSynchronizationTimingStandby(as, ees);
+      break;
+    default:
+      fail("Unknown test type: " + type);
+      break;
+    }
+  }
+
+  /**
+   * Helper method to test that neutral mode plays well with an active
+   * transition.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationActive(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    ees.becomeActive();
+
+    Thread.sleep(100);
+
+    verify(as).transitionToActive(any());
+    verify(as, never()).transitionToStandby(any());
+  }
+
+  /**
+   * Helper method to test that neutral mode plays well with a standby
+   * transition.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationStandby(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    ees.becomeStandby();
+
+    Thread.sleep(100);
+
+    verify(as, atLeast(1)).transitionToStandby(any());
+    verify(as, atMost(1)).transitionToStandby(any());
+  }
+
+  /**
+   * Helper method to test that neutral mode plays well with itself.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationNeutral(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    ees.enterNeutralMode();
+
+    Thread.sleep(100);
+
+    verify(as, atLeast(1)).transitionToStandby(any());
+    verify(as, atMost(1)).transitionToStandby(any());
+  }
+
+  /**
+   * Helper method to test that neutral mode does not race with an active
+   * transition.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationTimingActive(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    synchronized (ees.zkDisconnectLock) {
+      // Sleep while holding the lock so that the timer thread can't do
+      // anything when it runs.  Sleep until we're pretty sure the timer thread
+      // has tried to run.
+      Thread.sleep(100);
+      // While still holding the lock cancel the timer by transitioning. This
+      // simulates a race where the callback goes to cancel the timer while the
+      // timer is trying to run.
+      ees.becomeActive();
+    }
+
+    // Sleep just a little more so that the timer thread can do whatever it's
+    // going to do, hopefully nothing.
+    Thread.sleep(50);
+
+    verify(as).transitionToActive(any());
+    verify(as, never()).transitionToStandby(any());
+  }
+
+  /**
+   * Helper method to test that neutral mode does not race with an active
+   * transition.
+   *
+   * @param as the admin service
+   * @param ees the embedded elector service
+   * @throws IOException if there's an issue transitioning
+   * @throws InterruptedException if interrupted
+   */
+  private void testCallbackSynchronizationTimingStandby(AdminService as,
+      EmbeddedElectorService ees) throws IOException, InterruptedException {
+    synchronized (ees.zkDisconnectLock) {
+      // Sleep while holding the lock so that the timer thread can't do
+      // anything when it runs.  Sleep until we're pretty sure the timer thread
+      // has tried to run.
+      Thread.sleep(100);
+      // While still holding the lock cancel the timer by transitioning. This
+      // simulates a race where the callback goes to cancel the timer while the
+      // timer is trying to run.
+      ees.becomeStandby();
+    }
+
+    // Sleep just a little more so that the timer thread can do whatever it's
+    // going to do, hopefully nothing.
+    Thread.sleep(50);
+
+    verify(as, atLeast(1)).transitionToStandby(any());
+    verify(as, atMost(1)).transitionToStandby(any());
+  }
+
   private class MockRMWithElector extends MockRM {
     private long delayMs = 0;
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/51] [abbrv] hadoop git commit: HDFS-10972. Add unit test for HDFS command 'dfsadmin -getDatanodeInfo'. Contributed by Xiaobing Zhou

Posted by ae...@apache.org.
HDFS-10972. Add unit test for HDFS command 'dfsadmin -getDatanodeInfo'. Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3441c746
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3441c746
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3441c746

Branch: refs/heads/HDFS-7240
Commit: 3441c746b5f35c46fca5a0f252c86c8357fe932e
Parents: cef61d5
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Oct 10 11:33:37 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Oct 10 11:33:37 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 124 +++++++++++++++++--
 1 file changed, 113 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3441c746/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index e71c5cc..94ecb9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -30,12 +30,14 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -68,6 +70,10 @@ public class TestDFSAdmin {
   private DFSAdmin admin;
   private DataNode datanode;
   private NameNode namenode;
+  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
+  private static final PrintStream OLD_OUT = System.out;
+  private static final PrintStream OLD_ERR = System.err;
 
   @Before
   public void setUp() throws Exception {
@@ -77,12 +83,32 @@ public class TestDFSAdmin {
     admin = new DFSAdmin();
   }
 
+  private void redirectStream() {
+    System.setOut(new PrintStream(out));
+    System.setErr(new PrintStream(err));
+  }
+
+  private void resetStream() {
+    out.reset();
+    err.reset();
+  }
+
   @After
   public void tearDown() throws Exception {
+    try {
+      System.out.flush();
+      System.err.flush();
+    } finally {
+      System.setOut(OLD_OUT);
+      System.setErr(OLD_ERR);
+    }
+
     if (cluster != null) {
       cluster.shutdown();
       cluster = null;
     }
+
+    resetStream();
   }
 
   private void restartCluster() throws IOException {
@@ -111,28 +137,104 @@ public class TestDFSAdmin {
       String nodeType, String address, final List<String> outs,
       final List<String> errs) throws IOException {
     ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
-    PrintStream out = new PrintStream(bufOut);
+    PrintStream outStream = new PrintStream(bufOut);
     ByteArrayOutputStream bufErr = new ByteArrayOutputStream();
-    PrintStream err = new PrintStream(bufErr);
+    PrintStream errStream = new PrintStream(bufErr);
 
     if (methodName.equals("getReconfigurableProperties")) {
-      admin.getReconfigurableProperties(nodeType, address, out, err);
+      admin.getReconfigurableProperties(
+          nodeType,
+          address,
+          outStream,
+          errStream);
     } else if (methodName.equals("getReconfigurationStatus")) {
-      admin.getReconfigurationStatus(nodeType, address, out, err);
+      admin.getReconfigurationStatus(nodeType, address, outStream, errStream);
     } else if (methodName.equals("startReconfiguration")) {
-      admin.startReconfiguration(nodeType, address, out, err);
+      admin.startReconfiguration(nodeType, address, outStream, errStream);
     }
 
-    Scanner scanner = new Scanner(bufOut.toString());
+    scanIntoList(bufOut, outs);
+    scanIntoList(bufErr, errs);
+  }
+
+  private static void scanIntoList(
+      final ByteArrayOutputStream baos,
+      final List<String> list) {
+    final Scanner scanner = new Scanner(baos.toString());
     while (scanner.hasNextLine()) {
-      outs.add(scanner.nextLine());
+      list.add(scanner.nextLine());
     }
     scanner.close();
-    scanner = new Scanner(bufErr.toString());
-    while (scanner.hasNextLine()) {
-      errs.add(scanner.nextLine());
+  }
+
+  @Test(timeout = 30000)
+  public void testGetDatanodeInfo() throws Exception {
+    redirectStream();
+    final Configuration dfsConf = new HdfsConfiguration();
+    final int numDn = 2;
+
+    /* init cluster */
+    try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
+        .numDataNodes(numDn).build()) {
+
+      miniCluster.waitActive();
+      assertEquals(numDn, miniCluster.getDataNodes().size());
+      final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+
+      /* init reused vars */
+      List<String> outs = null;
+      int ret;
+
+      /**
+       * test erroneous run
+       */
+      resetStream();
+      outs = Lists.newArrayList();
+
+      /* invoke getDatanodeInfo */
+      ret = ToolRunner.run(
+          dfsAdmin,
+          new String[] {"-getDatanodeInfo", "128.0.0.1:1234"});
+
+      /* collect outputs */
+      scanIntoList(out, outs);
+
+      /* verify results */
+      assertEquals(-1, ret);
+      assertTrue("Unexpected getDatanodeInfo stdout", outs.isEmpty());
+
+      /**
+       * test normal run
+       */
+      for (int i = 0; i < numDn; i++) {
+        resetStream();
+        final DataNode dn = miniCluster.getDataNodes().get(i);
+
+        /* invoke getDatanodeInfo */
+        final String addr = String.format(
+            "%s:%d",
+            dn.getXferAddress().getHostString(),
+            dn.getIpcPort());
+        ret = ToolRunner.run(
+            dfsAdmin,
+            new String[] {"-getDatanodeInfo", addr});
+
+        /* collect outputs */
+        outs = Lists.newArrayList();
+        scanIntoList(out, outs);
+
+        /* verify results */
+        assertEquals(0, ret);
+        assertEquals(
+            "One line per DataNode like: Uptime: XXX, Software version: x.y.z,"
+                + " Config version: core-x.y.z,hdfs-x",
+            1, outs.size());
+        assertThat(outs.get(0),
+            is(allOf(containsString("Uptime:"),
+                containsString("Software version"),
+                containsString("Config version"))));
+      }
     }
-    scanner.close();
   }
 
   @Test(timeout = 30000)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org