You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cd...@apache.org on 2017/12/16 02:10:44 UTC

[17/46] hadoop git commit: HDFS-11792. [READ] Test cases for ProvidedVolumeDF and ProviderBlockIteratorImpl

HDFS-11792. [READ] Test cases for ProvidedVolumeDF and ProviderBlockIteratorImpl


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55ade54b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55ade54b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55ade54b

Branch: refs/heads/trunk
Commit: 55ade54b8ed36e18f028f478381a96e7b8c6be50
Parents: 4851f06
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Wed May 31 15:17:12 2017 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Fri Dec 15 17:51:38 2017 -0800

----------------------------------------------------------------------
 .../fsdataset/impl/ProvidedVolumeImpl.java      |  6 +-
 .../fsdataset/impl/TestProvidedImpl.java        | 94 ++++++++++++++++++--
 2 files changed, 92 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55ade54b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index a48e117..421b9cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -191,7 +191,11 @@ public class ProvidedVolumeImpl extends FsVolumeImpl {
 
   @Override
   long getBlockPoolUsed(String bpid) throws IOException {
-    return df.getBlockPoolUsed(bpid);
+    if (bpSlices.containsKey(bpid)) {
+      return df.getBlockPoolUsed(bpid);
+    } else {
+      throw new IOException("block pool " + bpid + " is not found");
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55ade54b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
index 2c119fe..4753235 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
@@ -83,6 +83,7 @@ public class TestProvidedImpl {
   private static final String BASE_DIR =
       new FileSystemTestHelper().getTestRootDir();
   private static final int NUM_LOCAL_INIT_VOLUMES = 1;
+  //only support one provided volume for now.
   private static final int NUM_PROVIDED_INIT_VOLUMES = 1;
   private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"};
   private static final int NUM_PROVIDED_BLKS = 10;
@@ -208,6 +209,39 @@ public class TestProvidedImpl {
     }
   }
 
+  public static class TestProvidedVolumeDF
+      implements ProvidedVolumeDF, Configurable {
+
+    @Override
+    public void setConf(Configuration conf) {
+    }
+
+    @Override
+    public Configuration getConf() {
+      return null;
+    }
+
+    @Override
+    public long getCapacity() {
+      return Long.MAX_VALUE;
+    }
+
+    @Override
+    public long getSpaceUsed() {
+      return -1;
+    }
+
+    @Override
+    public long getBlockPoolUsed(String bpid) {
+      return -1;
+    }
+
+    @Override
+    public long getAvailable() {
+      return Long.MAX_VALUE;
+    }
+  }
+
   private static Storage.StorageDirectory createLocalStorageDirectory(
       File root, Configuration conf)
       throws SecurityException, IOException {
@@ -299,8 +333,8 @@ public class TestProvidedImpl {
   public void setUp() throws IOException {
     datanode = mock(DataNode.class);
     storage = mock(DataStorage.class);
-    this.conf = new Configuration();
-    this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
+    conf = new Configuration();
+    conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
 
     when(datanode.getConf()).thenReturn(conf);
     final DNConf dnConf = new DNConf(datanode);
@@ -312,8 +346,10 @@ public class TestProvidedImpl {
         new ShortCircuitRegistry(conf);
     when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
 
-    this.conf.setClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
+    conf.setClass(DFSConfigKeys.DFS_PROVIDER_CLASS,
         TestFileRegionProvider.class, FileRegionProvider.class);
+    conf.setClass(DFSConfigKeys.DFS_PROVIDER_DF_CLASS,
+        TestProvidedVolumeDF.class, ProvidedVolumeDF.class);
 
     blkToPathMap = new HashMap<Long, String>();
     providedVolumes = new LinkedList<FsVolumeImpl>();
@@ -333,17 +369,43 @@ public class TestProvidedImpl {
     for (String bpid : BLOCK_POOL_IDS) {
       dataset.addBlockPool(bpid, conf);
     }
+  }
+
+  @Test
+  public void testProvidedVolumeImpl() throws IOException {
 
     assertEquals(NUM_LOCAL_INIT_VOLUMES + NUM_PROVIDED_INIT_VOLUMES,
         getNumVolumes());
+    assertEquals(NUM_PROVIDED_INIT_VOLUMES, providedVolumes.size());
     assertEquals(0, dataset.getNumFailedVolumes());
-  }
 
-  @Test
-  public void testProvidedStorageID() throws IOException {
+    TestProvidedVolumeDF df = new TestProvidedVolumeDF();
+
     for (int i = 0; i < providedVolumes.size(); i++) {
+      //check basic information about provided volume
       assertEquals(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT,
           providedVolumes.get(i).getStorageID());
+      assertEquals(StorageType.PROVIDED,
+          providedVolumes.get(i).getStorageType());
+
+      //check the df stats of the volume
+      assertEquals(df.getAvailable(), providedVolumes.get(i).getAvailable());
+      assertEquals(df.getBlockPoolUsed(BLOCK_POOL_IDS[CHOSEN_BP_ID]),
+          providedVolumes.get(i).getBlockPoolUsed(
+              BLOCK_POOL_IDS[CHOSEN_BP_ID]));
+
+      providedVolumes.get(i).shutdownBlockPool(
+          BLOCK_POOL_IDS[1 - CHOSEN_BP_ID], null);
+      try {
+        assertEquals(df.getBlockPoolUsed(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID]),
+            providedVolumes.get(i).getBlockPoolUsed(
+                BLOCK_POOL_IDS[1 - CHOSEN_BP_ID]));
+        //should not be triggered
+        assertTrue(false);
+      } catch (IOException e) {
+        LOG.info("Expected exception: " + e);
+      }
+
     }
   }
 
@@ -385,6 +447,8 @@ public class TestProvidedImpl {
       BlockIterator iter =
           vol.newBlockIterator(BLOCK_POOL_IDS[CHOSEN_BP_ID], "temp");
       Set<Long> blockIdsUsed = new HashSet<Long>();
+
+      assertEquals(BLOCK_POOL_IDS[CHOSEN_BP_ID], iter.getBlockPoolId());
       while(!iter.atEnd()) {
         ExtendedBlock eb = iter.nextBlock();
         long blkId = eb.getBlockId();
@@ -394,10 +458,26 @@ public class TestProvidedImpl {
         blockIdsUsed.add(blkId);
       }
       assertEquals(NUM_PROVIDED_BLKS, blockIdsUsed.size());
+
+      // rewind the block iterator
+      iter.rewind();
+      while(!iter.atEnd()) {
+        ExtendedBlock eb = iter.nextBlock();
+        long blkId = eb.getBlockId();
+        //the block should have already appeared in the first scan.
+        assertTrue(blockIdsUsed.contains(blkId));
+        blockIdsUsed.remove(blkId);
+      }
+      //none of the blocks should remain in blockIdsUsed
+      assertEquals(0, blockIdsUsed.size());
+
+      //the other block pool should not contain any blocks!
+      BlockIterator nonProvidedBpIter =
+          vol.newBlockIterator(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID], "temp");
+      assertEquals(null, nonProvidedBpIter.nextBlock());
     }
   }
 
-
   @Test
   public void testRefresh() throws IOException {
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org