You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2019/10/13 06:52:49 UTC

[hadoop-ozone] branch HDDS-1228 created (now 1a392a9)

This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch HDDS-1228
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


      at 1a392a9  HDDS-1228. Chunk Scanner Checkpoints

This branch includes the following new commits:

     new 1a392a9  HDDS-1228. Chunk Scanner Checkpoints

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: hdfs-commits-help@hadoop.apache.org


[hadoop-ozone] 01/01: HDDS-1228. Chunk Scanner Checkpoints

Posted by el...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch HDDS-1228
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 1a392a94be689bf9ce3dbcfca0f57ea3045f6903
Author: Doroszlai, Attila <ad...@apache.org>
AuthorDate: Tue Oct 8 20:07:08 2019 +0200

    HDDS-1228. Chunk Scanner Checkpoints
---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   1 +
 .../ozone/container/common/impl/ContainerData.java |  14 ++-
 .../container/common/impl/ContainerDataYaml.java   |  16 +++
 .../ozone/container/common/impl/ContainerSet.java  |   2 +
 .../container/common/interfaces/Container.java     |   8 ++
 .../container/keyvalue/KeyValueContainer.java      |  11 ++
 .../container/ozoneimpl/ContainerController.java   |   6 +
 .../container/ozoneimpl/ContainerDataScanner.java  |  30 ++++-
 .../container/common/impl/TestContainerSet.java    |  39 ++++++
 .../ozoneimpl/TestContainerScrubberMetrics.java    | 138 ++++++++++++---------
 10 files changed, 201 insertions(+), 64 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 9817d87..867da08 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -226,6 +226,7 @@ public final class OzoneConsts {
   public static final String CHUNKS_PATH = "chunksPath";
   public static final String CONTAINER_DB_TYPE = "containerDBType";
   public static final String CHECKSUM = "checksum";
+  public static final String DATA_SCAN_TIMESTAMP = "dataScanTimestamp";
   public static final String ORIGIN_PIPELINE_ID = "originPipelineId";
   public static final String ORIGIN_NODE_ID = "originNodeId";
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 85738e2..aad7d6a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -21,6 +21,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import java.io.IOException;
 import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
 import java.util.List;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
@@ -39,6 +40,7 @@ import org.yaml.snakeyaml.Yaml;
 import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE;
+import static org.apache.hadoop.ozone.OzoneConsts.DATA_SCAN_TIMESTAMP;
 import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION;
 import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE;
 import static org.apache.hadoop.ozone.OzoneConsts.METADATA;
@@ -89,7 +91,9 @@ public abstract class ContainerData {
   private HddsVolume volume;
 
   private String checksum;
-  public static final Charset CHARSET_ENCODING = Charset.forName("UTF-8");
+  private Long dataScanTimestamp;
+
+  public static final Charset CHARSET_ENCODING = StandardCharsets.UTF_8;
   private static final String DUMMY_CHECKSUM = new String(new byte[64],
       CHARSET_ENCODING);
 
@@ -103,6 +107,7 @@ public abstract class ContainerData {
       METADATA,
       MAX_SIZE,
       CHECKSUM,
+      DATA_SCAN_TIMESTAMP,
       ORIGIN_PIPELINE_ID,
       ORIGIN_NODE_ID));
 
@@ -506,6 +511,13 @@ public abstract class ContainerData {
     return this.checksum;
   }
 
+  public long getDataScanTimestamp() {
+    return dataScanTimestamp != null ? dataScanTimestamp : 0;
+  }
+
+  public void setDataScanTimestamp(long timestamp) {
+    this.dataScanTimestamp = timestamp;
+  }
 
   /**
    * Returns the origin pipeline Id of this container.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
index 1f9966c..048b2ae 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
@@ -53,6 +53,7 @@ import org.yaml.snakeyaml.introspector.Property;
 import org.yaml.snakeyaml.introspector.PropertyUtils;
 import org.yaml.snakeyaml.nodes.MappingNode;
 import org.yaml.snakeyaml.nodes.Node;
+import org.yaml.snakeyaml.nodes.NodeTuple;
 import org.yaml.snakeyaml.nodes.ScalarNode;
 import org.yaml.snakeyaml.nodes.Tag;
 import org.yaml.snakeyaml.representer.Representer;
@@ -217,6 +218,17 @@ public final class ContainerDataYaml {
       }
       return filtered;
     }
+
+    /**
+     * Omit properties with null value.
+     */
+    @Override
+    protected NodeTuple representJavaBeanProperty(
+        Object bean, Property property, Object value, Tag tag) {
+      return value == null
+          ? null
+          : super.representJavaBeanProperty(bean, property, value, tag);
+    }
   }
 
   /**
@@ -260,6 +272,10 @@ public final class ContainerDataYaml {
         Map<String, String> meta = (Map) nodes.get(OzoneConsts.METADATA);
         kvData.setMetadata(meta);
         kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM));
+        Long timestamp = (Long) nodes.get(OzoneConsts.DATA_SCAN_TIMESTAMP);
+        if (timestamp != null) {
+          kvData.setDataScanTimestamp(timestamp);
+        }
         String state = (String) nodes.get(OzoneConsts.STATE);
         kvData
             .setState(ContainerProtos.ContainerDataProto.State.valueOf(state));
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index 41415eb..19f8032 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -132,6 +132,7 @@ public class ContainerSet {
 
   /**
    * Return an iterator of containers associated with the specified volume.
+   * The iterator is sorted by last data scan timestamp in increasing order.
    *
    * @param  volume the HDDS volume which should be used to filter containers
    * @return {@literal Iterator<Container<?>>}
@@ -143,6 +144,7 @@ public class ContainerSet {
     return containerMap.values().stream()
         .filter(x -> volumeUuid.equals(x.getContainerData().getVolume()
             .getStorageID()))
+        .sorted(Container.DATA_SCAN_ORDER)
         .iterator();
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index 7f7deaf..332cb61 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.Comparator;
 import java.util.Map;
 
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -41,6 +42,10 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
  */
 public interface Container<CONTAINERDATA extends ContainerData> extends RwLock {
 
+  Comparator<Container<?>> DATA_SCAN_ORDER = Comparator.<Container<?>>
+      comparingLong(c -> c.getContainerData().getDataScanTimestamp())
+          .thenComparingLong(c -> c.getContainerData().getContainerID());
+
   /**
    * Creates a container.
    *
@@ -66,6 +71,9 @@ public interface Container<CONTAINERDATA extends ContainerData> extends RwLock {
   void update(Map<String, String> metaData, boolean forceUpdate)
       throws StorageContainerException;
 
+  void updateDataScanTimestamp(long timestamp)
+      throws StorageContainerException;
+
   /**
    * Get metadata about the container.
    *
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index a6e914b..de972fa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -337,6 +337,17 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
         containerData.getBlockCommitSequenceId());
   }
 
+  @Override
+  public void updateDataScanTimestamp(long timestamp)
+      throws StorageContainerException {
+    writeLock();
+    try {
+      updateContainerData(() -> containerData.setDataScanTimestamp(timestamp));
+    } finally {
+      writeUnlock();
+    }
+  }
+
   /**
    *
    * Must be invoked with the writeLock held.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
index 8bbdec9..7e759b2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
@@ -174,4 +174,10 @@ public class ContainerController {
     return containerSet.getContainerIterator(volume);
   }
 
+  void updateDataScanTimestamp(long containerId, long timestamp)
+      throws IOException {
+    Container container = containerSet.getContainer(containerId);
+    container.updateDataScanTimestamp(timestamp);
+  }
+
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
index 1141951..0e38a47 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
 import java.io.IOException;
+import java.time.Instant;
 import java.util.Iterator;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.slf4j.Logger;
@@ -95,14 +97,19 @@ public class ContainerDataScanner extends Thread {
     while (!stopping && itr.hasNext()) {
       Container c = itr.next();
       if (c.shouldScanData()) {
+        ContainerData containerData = c.getContainerData();
+        long containerId = containerData.getContainerID();
         try {
+          logScanStart(containerData);
           if (!c.scanData(throttler, canceler)) {
             metrics.incNumUnHealthyContainers();
-            controller.markContainerUnhealthy(
-                c.getContainerData().getContainerID());
+            controller.markContainerUnhealthy(containerId);
+          } else {
+            long now = System.currentTimeMillis();
+            logScanCompleted(containerData, now);
+            controller.updateDataScanTimestamp(containerId, now);
           }
         } catch (IOException ex) {
-          long containerId = c.getContainerData().getContainerID();
           LOG.warn("Unexpected exception while scanning container "
               + containerId, ex);
         } finally {
@@ -135,6 +142,23 @@ public class ContainerDataScanner extends Thread {
     }
   }
 
+  private static void logScanStart(ContainerData containerData) {
+    if (LOG.isDebugEnabled()) {
+      long scanTimestamp = containerData.getDataScanTimestamp();
+      Object lastScanTime = scanTimestamp <= 0 ? "never"
+          : ("at " + Instant.ofEpochMilli(scanTimestamp));
+      LOG.debug("Scanning container {}, last scanned {}",
+          containerData.getContainerID(), lastScanTime);
+    }
+  }
+
+  private static void logScanCompleted(ContainerData containerData, long now) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Completed scan of container {} at {}",
+          containerData.getContainerID(), Instant.ofEpochMilli(now));
+    }
+  }
+
   public synchronized void shutdown() {
     this.stopping = true;
     this.canceler.cancel("ContainerDataScanner("+volume+") is shutting down");
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
index e1e7119..e37fdd6 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
@@ -37,8 +37,12 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
+import java.util.Random;
 import java.util.UUID;
+import java.util.function.Function;
 
+import static java.util.stream.Collectors.toList;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
@@ -179,6 +183,41 @@ public class TestContainerSet {
   }
 
   @Test
+  public void iteratorIsOrderedByScanTime() throws StorageContainerException {
+    HddsVolume vol = Mockito.mock(HddsVolume.class);
+    Mockito.when(vol.getStorageID()).thenReturn("uuid-1");
+    Random random = new Random();
+    ContainerSet containerSet = new ContainerSet();
+    for (int i=0; i<10; i++) {
+      KeyValueContainerData kvData = new KeyValueContainerData(i,
+          (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
+          UUID.randomUUID().toString());
+      kvData.setDataScanTimestamp(Math.abs(random.nextLong()));
+      kvData.setVolume(vol);
+      kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
+      KeyValueContainer kv = new KeyValueContainer(kvData, new
+          OzoneConfiguration());
+      containerSet.addContainer(kv);
+    }
+    List<Container<?>> expectedOrder =
+        new ArrayList<>(containerSet.getContainerMap().values());
+    expectedOrder.sort(Container.DATA_SCAN_ORDER);
+
+    List<Container<?>> containers = new ArrayList<>();
+    containerSet.getContainerIterator(vol).forEachRemaining(containers::add);
+
+    if (!Objects.equals(expectedOrder, containers)) {
+      Function<Container, ?> debug = c ->
+          c.getContainerData().getContainerID()
+              + " " + c.getContainerData().getDataScanTimestamp();
+      assertEquals(
+          expectedOrder.stream().map(debug).collect(toList()),
+          containers.stream().map(debug).collect(toList())
+      );
+    }
+  }
+
+  @Test
   public void testGetContainerReport() throws IOException {
 
     ContainerSet containerSet = createContainerSet();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
index b9b1bea..fe6a929 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
@@ -23,88 +23,106 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.runners.MockitoJUnitRunner;
 
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 /**
  * This test verifies the container scrubber metrics functionality.
  */
+@RunWith(MockitoJUnitRunner.class)
 public class TestContainerScrubberMetrics {
+
+  private final AtomicLong containerIdSeq = new AtomicLong(100);
+
+  @Mock
+  private Container<ContainerData> healthy;
+
+  @Mock
+  private Container<ContainerData> corruptMetadata;
+
+  @Mock
+  private Container<ContainerData> corruptData;
+
+  @Mock
+  private HddsVolume vol;
+
+  private ContainerScrubberConfiguration conf;
+  private ContainerController controller;
+
+  @Before
+  public void setup() {
+    conf = new OzoneConfiguration()
+        .getObject(ContainerScrubberConfiguration.class);
+    conf.setMetadataScanInterval(0);
+    conf.setDataScanInterval(0);
+    controller = mockContainerController();
+  }
+
   @Test
   public void testContainerMetaDataScrubberMetrics() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerScrubberConfiguration c = conf.getObject(
-        ContainerScrubberConfiguration.class);
-    c.setMetadataScanInterval(0);
-    HddsVolume vol = Mockito.mock(HddsVolume.class);
-    ContainerController cntrl = mockContainerController(vol);
-
-    ContainerMetadataScanner mc = new ContainerMetadataScanner(c, cntrl);
-    mc.runIteration();
-
-    Assert.assertEquals(1, mc.getMetrics().getNumScanIterations());
-    Assert.assertEquals(3, mc.getMetrics().getNumContainersScanned());
-    Assert.assertEquals(1, mc.getMetrics().getNumUnHealthyContainers());
+    ContainerMetadataScanner subject =
+        new ContainerMetadataScanner(conf, controller);
+    subject.runIteration();
+
+    ContainerMetadataScrubberMetrics metrics = subject.getMetrics();
+    assertEquals(1, metrics.getNumScanIterations());
+    assertEquals(3, metrics.getNumContainersScanned());
+    assertEquals(1, metrics.getNumUnHealthyContainers());
   }
 
   @Test
   public void testContainerDataScrubberMetrics() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerScrubberConfiguration c = conf.getObject(
-        ContainerScrubberConfiguration.class);
-    c.setDataScanInterval(0);
-    HddsVolume vol = Mockito.mock(HddsVolume.class);
-    ContainerController cntrl = mockContainerController(vol);
-
-    ContainerDataScanner sc = new ContainerDataScanner(c, cntrl, vol);
-    sc.runIteration();
-
-    ContainerDataScrubberMetrics m = sc.getMetrics();
-    Assert.assertEquals(1, m.getNumScanIterations());
-    Assert.assertEquals(2, m.getNumContainersScanned());
-    Assert.assertEquals(1, m.getNumUnHealthyContainers());
+    ContainerDataScanner subject =
+        new ContainerDataScanner(conf, controller, vol);
+    subject.runIteration();
+
+    ContainerDataScrubberMetrics metrics = subject.getMetrics();
+    assertEquals(1, metrics.getNumScanIterations());
+    assertEquals(2, metrics.getNumContainersScanned());
+    assertEquals(1, metrics.getNumUnHealthyContainers());
   }
 
-  private ContainerController mockContainerController(HddsVolume vol) {
+  private ContainerController mockContainerController() {
     // healthy container
-    Container<ContainerData> c1 = Mockito.mock(Container.class);
-    Mockito.when(c1.shouldScanData()).thenReturn(true);
-    Mockito.when(c1.scanMetaData()).thenReturn(true);
-    Mockito.when(c1.scanData(
-        Mockito.any(DataTransferThrottler.class),
-        Mockito.any(Canceler.class))).thenReturn(true);
+    setupMockContainer(healthy, true, true, true);
 
     // unhealthy container (corrupt data)
-    ContainerData c2d = Mockito.mock(ContainerData.class);
-    Mockito.when(c2d.getContainerID()).thenReturn(101L);
-    Container<ContainerData> c2 = Mockito.mock(Container.class);
-    Mockito.when(c2.scanMetaData()).thenReturn(true);
-    Mockito.when(c2.shouldScanData()).thenReturn(true);
-    Mockito.when(c2.scanData(
-        Mockito.any(DataTransferThrottler.class),
-        Mockito.any(Canceler.class))).thenReturn(false);
-    Mockito.when(c2.getContainerData()).thenReturn(c2d);
+    setupMockContainer(corruptData, true, true, false);
 
     // unhealthy container (corrupt metadata)
-    ContainerData c3d = Mockito.mock(ContainerData.class);
-    Mockito.when(c3d.getContainerID()).thenReturn(102L);
-    Container<ContainerData> c3 = Mockito.mock(Container.class);
-    Mockito.when(c3.shouldScanData()).thenReturn(false);
-    Mockito.when(c3.scanMetaData()).thenReturn(false);
-    Mockito.when(c3.getContainerData()).thenReturn(c3d);
-
-    Collection<Container<?>> containers = Arrays.asList(c1, c2, c3);
-    ContainerController cntrl = Mockito.mock(ContainerController.class);
-    Mockito.when(cntrl.getContainers(vol))
-        .thenReturn(containers.iterator());
-    Mockito.when(cntrl.getContainers())
-        .thenReturn(containers.iterator());
-
-    return cntrl;
+    setupMockContainer(corruptMetadata, false, false, false);
+
+    Collection<Container<?>> containers = Arrays.asList(
+        healthy, corruptData, corruptMetadata);
+    ContainerController mock = mock(ContainerController.class);
+    when(mock.getContainers(vol)).thenReturn(containers.iterator());
+    when(mock.getContainers()).thenReturn(containers.iterator());
+
+    return mock;
+  }
+
+  private void setupMockContainer(
+      Container<ContainerData> c, boolean shouldScanData,
+      boolean scanMetaDataSuccess, boolean scanDataSuccess) {
+    ContainerData data = mock(ContainerData.class);
+    when(data.getContainerID()).thenReturn(containerIdSeq.getAndIncrement());
+    when(c.getContainerData()).thenReturn(data);
+    when(c.shouldScanData()).thenReturn(shouldScanData);
+    when(c.scanMetaData()).thenReturn(scanMetaDataSuccess);
+    when(c.scanData(any(DataTransferThrottler.class), any(Canceler.class)))
+        .thenReturn(scanDataSuccess);
   }
 
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: hdfs-commits-help@hadoop.apache.org