You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2017/08/25 19:04:00 UTC

[2/3] hadoop git commit: HDFS-12319. DirectoryScanner will throw IllegalStateException when Multiple BP's are present. Contributed by Brahma Reddy Battula.

HDFS-12319. DirectoryScanner will throw IllegalStateException when Multiple BP's are present. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b85d38f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b85d38f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b85d38f

Branch: refs/heads/branch-2.8.2
Commit: 4b85d38f6052ed59b848f9a8f5caa5be2a1595f8
Parents: 86db22f
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Aug 25 10:41:52 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Aug 25 12:03:39 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/DirectoryScanner.java  |  5 +-
 .../server/datanode/TestDirectoryScanner.java   | 66 ++++++++++++++++++--
 2 files changed, 62 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b85d38f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 64fa94e..f96386a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -815,13 +815,12 @@ public class DirectoryScanner implements Runnable {
     public ScanInfoPerBlockPool call() throws IOException {
       String[] bpList = volume.getBlockPoolList();
       ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
+      perfTimer.start();
+      throttleTimer.start();
       for (String bpid : bpList) {
         LinkedList<ScanInfo> report = new LinkedList<>();
         File bpFinalizedDir = volume.getFinalizedDir(bpid);
 
-        perfTimer.start();
-        throttleTimer.start();
-
         try {
           result.put(bpid,
               compileReport(volume, bpFinalizedDir, bpFinalizedDir, report));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b85d38f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 35bd7e8..c7fa7f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -47,16 +47,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
@@ -307,15 +310,22 @@ public class TestDirectoryScanner {
          missingMemoryBlocks, mismatchBlocks, 0);
   }
 
-    private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile,
-      long missingMemoryBlocks, long mismatchBlocks, long duplicateBlocks) throws IOException {
+  private void scan(long totalBlocks, int diffsize, long missingMetaFile,
+      long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
+      long duplicateBlocks) throws IOException {
     scanner.reconcile();
-    
+    verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
+        missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+  }
+
+  private void verifyStats(long totalBlocks, int diffsize, long missingMetaFile,
+      long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
+      long duplicateBlocks) {
     assertTrue(scanner.diffs.containsKey(bpid));
     LinkedList<DirectoryScanner.ScanInfo> diff = scanner.diffs.get(bpid);
     assertTrue(scanner.stats.containsKey(bpid));
     DirectoryScanner.Stats stats = scanner.stats.get(bpid);
-    
+
     assertEquals(diffsize, diff.size());
     assertEquals(totalBlocks, stats.totalBlocks);
     assertEquals(missingMetaFile, stats.missingMetaFile);
@@ -991,4 +1001,48 @@ public class TestDirectoryScanner {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testDirectoryScannerInFederatedCluster() throws Exception {
+    //Create Federated cluster with two nameservices and one DN
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
+        .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
+        .numDataNodes(1).build()) {
+      cluster.waitActive();
+      cluster.transitionToActive(1);
+      cluster.transitionToActive(3);
+      DataNode dataNode = cluster.getDataNodes().get(0);
+      fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
+      //Create one block in first nameservice
+      FileSystem fs = cluster.getFileSystem(1);
+      int bp1Files = 1;
+      writeFile(fs, bp1Files);
+      //Create two blocks in second nameservice
+      FileSystem fs2 = cluster.getFileSystem(3);
+      int bp2Files = 2;
+      writeFile(fs2, bp2Files);
+      //Call the Directory scanner
+      scanner = new DirectoryScanner(dataNode, fds, CONF);
+      scanner.setRetainDiffs(true);
+      scanner.reconcile();
+      //Check blocks in corresponding BP
+      bpid = cluster.getNamesystem(1).getBlockPoolId();
+      verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
+      bpid = cluster.getNamesystem(3).getBlockPoolId();
+      verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+    } finally {
+      if (scanner != null) {
+        scanner.shutdown();
+        scanner = null;
+      }
+    }
+  }
+
+  private void writeFile(FileSystem fs, int numFiles) throws IOException {
+    final String fileName = "/" + GenericTestUtils.getMethodName();
+    final Path filePath = new Path(fileName);
+    for (int i = 0; i < numFiles; i++) {
+      DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org