You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2016/02/12 21:42:49 UTC

[2/3] hadoop git commit: HDFS-9801. ReconfigurableBase should update the cached configuration. (Arpit Agarwal)

HDFS-9801. ReconfigurableBase should update the cached configuration. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63f51208
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63f51208
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63f51208

Branch: refs/heads/branch-2
Commit: 63f51208a8eced43e5f7799c045098bba11a68c2
Parents: f8c9c0ff
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Feb 12 12:41:04 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Feb 12 12:41:12 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/conf/Reconfigurable.java  |   8 +-
 .../apache/hadoop/conf/ReconfigurableBase.java  |  47 +++---
 .../apache/hadoop/conf/TestReconfiguration.java | 143 ++++++++++++++++++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop/hdfs/server/datanode/DataNode.java   | 114 ++++++++-------
 .../datanode/TestDataNodeHotSwapVolumes.java    |  46 ++++--
 .../datanode/TestDataNodeVolumeFailure.java     |  21 ++-
 .../TestDataNodeVolumeFailureReporting.java     |   9 +-
 8 files changed, 291 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f51208/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java
index 466915d..c93dc31 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java
@@ -36,7 +36,7 @@ public interface Reconfigurable extends Configurable {
    * If the property cannot be changed, throw a 
    * {@link ReconfigurationException}.
    */
-  public String reconfigureProperty(String property, String newVal) 
+  void reconfigureProperty(String property, String newVal)
     throws ReconfigurationException;
 
   /**
@@ -46,12 +46,10 @@ public interface Reconfigurable extends Configurable {
    * then changeConf should not throw an exception when changing
    * this property.
    */
-  public boolean isPropertyReconfigurable(String property);
+  boolean isPropertyReconfigurable(String property);
 
   /**
    * Return all the properties that can be changed at run time.
    */
-  public Collection<String> getReconfigurableProperties();
-
-
+  Collection<String> getReconfigurableProperties();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f51208/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
index e50b85a..681ca2b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
@@ -112,14 +112,14 @@ public abstract class ReconfigurableBase
     // See {@link ReconfigurationServlet#applyChanges}
     public void run() {
       LOG.info("Starting reconfiguration task.");
-      Configuration oldConf = this.parent.getConf();
-      Configuration newConf = this.parent.getNewConf();
-      Collection<PropertyChange> changes =
-          this.parent.getChangedProperties(newConf, oldConf);
+      final Configuration oldConf = parent.getConf();
+      final Configuration newConf = parent.getNewConf();
+      final Collection<PropertyChange> changes =
+          parent.getChangedProperties(newConf, oldConf);
       Map<PropertyChange, Optional<String>> results = Maps.newHashMap();
       for (PropertyChange change : changes) {
         String errorMessage = null;
-        if (!this.parent.isPropertyReconfigurable(change.prop)) {
+        if (!parent.isPropertyReconfigurable(change.prop)) {
           LOG.info(String.format(
               "Property %s is not configurable: old value: %s, new value: %s",
               change.prop, change.oldVal, change.newVal));
@@ -130,17 +130,23 @@ public abstract class ReconfigurableBase
             + "\" to \"" + ((change.newVal == null) ? "<default>" : change.newVal)
             + "\".");
         try {
-          this.parent.reconfigurePropertyImpl(change.prop, change.newVal);
+          String effectiveValue =
+              parent.reconfigurePropertyImpl(change.prop, change.newVal);
+          if (change.newVal != null) {
+            oldConf.set(change.prop, effectiveValue);
+          } else {
+            oldConf.unset(change.prop);
+          }
         } catch (ReconfigurationException e) {
           errorMessage = e.getCause().getMessage();
         }
         results.put(change, Optional.fromNullable(errorMessage));
       }
 
-      synchronized (this.parent.reconfigLock) {
-        this.parent.endTime = Time.now();
-        this.parent.status = Collections.unmodifiableMap(results);
-        this.parent.reconfigThread = null;
+      synchronized (parent.reconfigLock) {
+        parent.endTime = Time.now();
+        parent.status = Collections.unmodifiableMap(results);
+        parent.reconfigThread = null;
       }
     }
   }
@@ -203,21 +209,19 @@ public abstract class ReconfigurableBase
    * reconfigureProperty.
    */
   @Override
-  public final String reconfigureProperty(String property, String newVal) 
+  public final void reconfigureProperty(String property, String newVal)
     throws ReconfigurationException {
     if (isPropertyReconfigurable(property)) {
       LOG.info("changing property " + property + " to " + newVal);
-      String oldVal;
       synchronized(getConf()) {
-        oldVal = getConf().get(property);
-        reconfigurePropertyImpl(property, newVal);
+        getConf().get(property);
+        String effectiveValue = reconfigurePropertyImpl(property, newVal);
         if (newVal != null) {
-          getConf().set(property, newVal);
+          getConf().set(property, effectiveValue);
         } else {
           getConf().unset(property);
         }
       }
-      return oldVal;
     } else {
       throw new ReconfigurationException(property, newVal,
                                              getConf().get(property));
@@ -251,8 +255,15 @@ public abstract class ReconfigurableBase
    * that is being changed. If this object owns other Reconfigurable objects
    * reconfigureProperty should be called recursively to make sure that
    * to make sure that the configuration of these objects is updated.
+   *
+   * @param property Name of the property that is being reconfigured.
+   * @param newVal Proposed new value of the property.
+   * @return Effective new value of the property. This may be different from
+   *         newVal.
+   *
+   * @throws ReconfigurationException if there was an error applying newVal.
    */
-  protected abstract void reconfigurePropertyImpl(String property, String newVal) 
-    throws ReconfigurationException;
+  protected abstract String reconfigurePropertyImpl(
+      String property, String newVal) throws ReconfigurationException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f51208/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java
index 5f0516ae..610c08a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.conf;
 
 import com.google.common.base.Optional;
+import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -27,13 +28,13 @@ import org.junit.Test;
 import org.junit.Before;
 
 import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.*;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
 import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
@@ -44,6 +45,7 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
 
 public class TestReconfiguration {
   private Configuration conf1;
@@ -129,9 +131,10 @@ public class TestReconfiguration {
     }
 
     @Override
-    public synchronized void reconfigurePropertyImpl(
+    public synchronized String reconfigurePropertyImpl(
         String property, String newVal) throws ReconfigurationException {
       // do nothing
+      return newVal;
     }
     
     /**
@@ -354,13 +357,14 @@ public class TestReconfiguration {
     }
 
     @Override
-    public synchronized void reconfigurePropertyImpl(String property,
+    public synchronized String reconfigurePropertyImpl(String property,
         String newVal) throws ReconfigurationException {
       try {
         latch.await();
       } catch (InterruptedException e) {
         // Ignore
       }
+      return newVal;
     }
   }
 
@@ -395,9 +399,9 @@ public class TestReconfiguration {
     doReturn(false).when(dummy).isPropertyReconfigurable(eq("name2"));
     doReturn(true).when(dummy).isPropertyReconfigurable(eq("name3"));
 
-    doNothing().when(dummy)
+    doReturn("dummy").when(dummy)
         .reconfigurePropertyImpl(eq("name1"), anyString());
-    doNothing().when(dummy)
+    doReturn("dummy").when(dummy)
         .reconfigurePropertyImpl(eq("name2"), anyString());
     doThrow(new ReconfigurationException("NAME3", "NEW3", "OLD3",
         new IOException("io exception")))
@@ -474,4 +478,131 @@ public class TestReconfiguration {
       GenericTestUtils.assertExceptionContains("The server is stopped", e);
     }
   }
-}
\ No newline at end of file
+
+  /**
+   * Ensure that {@link ReconfigurableBase#reconfigureProperty} updates the
+   * parent's cached configuration on success.
+   * @throws IOException
+   */
+  @Test (timeout=300000)
+  public void testConfIsUpdatedOnSuccess() throws ReconfigurationException {
+    final String property = "FOO";
+    final String value1 = "value1";
+    final String value2 = "value2";
+
+    final Configuration conf = new Configuration();
+    conf.set(property, value1);
+    final Configuration newConf = new Configuration();
+    newConf.set(property, value2);
+
+    final ReconfigurableBase reconfigurable = makeReconfigurable(
+        conf, newConf, Arrays.asList(property));
+
+    reconfigurable.reconfigureProperty(property, value2);
+    assertThat(reconfigurable.getConf().get(property), is(value2));
+  }
+
+  /**
+   * Ensure that {@link ReconfigurableBase#startReconfigurationTask} updates
+   * its parent's cached configuration on success.
+   * @throws IOException
+   */
+  @Test (timeout=300000)
+  public void testConfIsUpdatedOnSuccessAsync() throws ReconfigurationException,
+      TimeoutException, InterruptedException, IOException {
+    final String property = "FOO";
+    final String value1 = "value1";
+    final String value2 = "value2";
+
+    final Configuration conf = new Configuration();
+    conf.set(property, value1);
+    final Configuration newConf = new Configuration();
+    newConf.set(property, value2);
+
+    final ReconfigurableBase reconfigurable = makeReconfigurable(
+        conf, newConf, Arrays.asList(property));
+
+    // Kick off a reconfiguration task and wait until it completes.
+    reconfigurable.startReconfigurationTask();
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return reconfigurable.getReconfigurationTaskStatus().stopped();
+      }
+    }, 100, 60000);
+    assertThat(reconfigurable.getConf().get(property), is(value2));
+  }
+
+  /**
+   * Ensure that {@link ReconfigurableBase#reconfigureProperty} unsets the
+   * property in its parent's configuration when the new value is null.
+   * @throws IOException
+   */
+  @Test (timeout=300000)
+  public void testConfIsUnset() throws ReconfigurationException {
+    final String property = "FOO";
+    final String value1 = "value1";
+
+    final Configuration conf = new Configuration();
+    conf.set(property, value1);
+    final Configuration newConf = new Configuration();
+
+    final ReconfigurableBase reconfigurable = makeReconfigurable(
+        conf, newConf, Arrays.asList(property));
+
+    reconfigurable.reconfigureProperty(property, null);
+    assertNull(reconfigurable.getConf().get(property));
+  }
+
+  /**
+   * Ensure that {@link ReconfigurableBase#startReconfigurationTask} unsets the
+   * property in its parent's configuration when the new value is null.
+   * @throws IOException
+   */
+  @Test (timeout=300000)
+  public void testConfIsUnsetAsync() throws ReconfigurationException,
+      IOException, TimeoutException, InterruptedException {
+    final String property = "FOO";
+    final String value1 = "value1";
+
+    final Configuration conf = new Configuration();
+    conf.set(property, value1);
+    final Configuration newConf = new Configuration();
+
+    final ReconfigurableBase reconfigurable = makeReconfigurable(
+        conf, newConf, Arrays.asList(property));
+
+    // Kick off a reconfiguration task and wait until it completes.
+    reconfigurable.startReconfigurationTask();
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return reconfigurable.getReconfigurationTaskStatus().stopped();
+      }
+    }, 100, 60000);
+    assertNull(reconfigurable.getConf().get(property));
+  }
+
+  private ReconfigurableBase makeReconfigurable(
+      final Configuration oldConf, final Configuration newConf,
+      final Collection<String> reconfigurableProperties) {
+
+    return new ReconfigurableBase(oldConf) {
+      @Override
+      protected Configuration getNewConf() {
+        return newConf;
+      }
+
+      @Override
+      public Collection<String> getReconfigurableProperties() {
+        return reconfigurableProperties;
+      }
+
+      @Override
+      protected String reconfigurePropertyImpl(
+          String property, String newVal) throws ReconfigurationException {
+        return newVal;
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f51208/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 20080ed..1b456c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1838,6 +1838,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9790. HDFS Balancer should exit with a proper message if upgrade is
     not finalized. (Xiaobing Zhou via Arpit Agarwal)
 
+    HDFS-9801. ReconfigurableBase should update the cached configuration.
+    (Arpit Agarwal)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f51208/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index ecd1df5..8e05112 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -509,70 +509,80 @@ public class DataNode extends ReconfigurableBase
     return new HdfsConfiguration();
   }
 
+  /**
+   * {@inheritdoc}.
+   */
   @Override
-  public void reconfigurePropertyImpl(String property, String newVal)
+  public String reconfigurePropertyImpl(String property, String newVal)
       throws ReconfigurationException {
-    if (property.equals(DFS_DATANODE_DATA_DIR_KEY)) {
-      IOException rootException = null;
-      try {
-        LOG.info("Reconfiguring " + property + " to " + newVal);
-        this.refreshVolumes(newVal);
-      } catch (IOException e) {
-        rootException = e;
-      } finally {
-        // Send a full block report to let NN acknowledge the volume changes.
+    switch (property) {
+      case DFS_DATANODE_DATA_DIR_KEY: {
+        IOException rootException = null;
         try {
-          triggerBlockReport(
-              new BlockReportOptions.Factory().setIncremental(false).build());
+          LOG.info("Reconfiguring " + property + " to " + newVal);
+          this.refreshVolumes(newVal);
+          return conf.get(DFS_DATANODE_DATA_DIR_KEY);
         } catch (IOException e) {
-          LOG.warn("Exception while sending the block report after refreshing"
-              + " volumes " + property + " to " + newVal, e);
-          if (rootException == null) {
-            rootException = e;
-          }
+          rootException = e;
         } finally {
-          if (rootException != null) {
-            throw new ReconfigurationException(property, newVal,
-                getConf().get(property), rootException);
+          // Send a full block report to let NN acknowledge the volume changes.
+          try {
+            triggerBlockReport(
+                new BlockReportOptions.Factory().setIncremental(false).build());
+          } catch (IOException e) {
+            LOG.warn("Exception while sending the block report after refreshing"
+                + " volumes " + property + " to " + newVal, e);
+            if (rootException == null) {
+              rootException = e;
+            }
+          } finally {
+            if (rootException != null) {
+              throw new ReconfigurationException(property, newVal,
+                  getConf().get(property), rootException);
+            }
           }
         }
+        break;
       }
-    } else if (property.equals(
-        DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY)) {
-      ReconfigurationException rootException = null;
-      try {
-        LOG.info("Reconfiguring " + property + " to " + newVal);
-        int movers;
-        if (newVal == null) {
-          // set to default
-          movers = DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
-        } else {
-          movers = Integer.parseInt(newVal);
-          if (movers <= 0) {
-            rootException = new ReconfigurationException(
-                property,
-                newVal,
-                getConf().get(property),
-                new IllegalArgumentException(
-                    "balancer max concurrent movers must be larger than 0"));
+      case DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY: {
+        ReconfigurationException rootException = null;
+        try {
+          LOG.info("Reconfiguring " + property + " to " + newVal);
+          int movers;
+          if (newVal == null) {
+            // set to default
+            movers = DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
+          } else {
+            movers = Integer.parseInt(newVal);
+            if (movers <= 0) {
+              rootException = new ReconfigurationException(
+                  property,
+                  newVal,
+                  getConf().get(property),
+                  new IllegalArgumentException(
+                      "balancer max concurrent movers must be larger than 0"));
+            }
+          }
+          xserver.updateBalancerMaxConcurrentMovers(movers);
+          return Integer.toString(movers);
+        } catch (NumberFormatException nfe) {
+          rootException = new ReconfigurationException(
+              property, newVal, getConf().get(property), nfe);
+        } finally {
+          if (rootException != null) {
+            LOG.warn(String.format(
+                "Exception in updating balancer max concurrent movers %s to %s",
+                property, newVal), rootException);
+            throw rootException;
           }
         }
-        xserver.updateBalancerMaxConcurrentMovers(movers);
-      } catch(NumberFormatException nfe) {
-        rootException = new ReconfigurationException(
-            property, newVal, getConf().get(property), nfe);
-      } finally {
-        if (rootException != null) {
-          LOG.warn(String.format(
-              "Exception in updating balancer max concurrent movers %s to %s",
-              property, newVal), rootException);
-          throw rootException;
-        }
+        break;
       }
-    } else {
-      throw new ReconfigurationException(
-          property, newVal, getConf().get(property));
+      default:
+        break;
     }
+    throw new ReconfigurationException(
+        property, newVal, getConf().get(property));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f51208/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 212d2e6..725bc6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -284,7 +284,10 @@ public class TestDataNodeHotSwapVolumes {
     }
 
     String newDataDir = newDataDirBuf.toString();
-    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDataDir);
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDataDir),
+        is(conf.get(DFS_DATANODE_DATA_DIR_KEY)));
 
     // Verify the configuration value is appropriately set.
     String[] effectiveDataDirs = conf.get(DFS_DATANODE_DATA_DIR_KEY).split(",");
@@ -447,8 +450,11 @@ public class TestDataNodeHotSwapVolumes {
     DataNode dn = cluster.getDataNodes().get(0);
     Collection<String> oldDirs = getDataDirs(dn);
     String newDirs = oldDirs.iterator().next();  // Keep the first volume.
-    dn.reconfigurePropertyImpl(
-        DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(
+            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs),
+        is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
     assertFileLocksReleased(
       new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));
     dn.scheduleAllBlockReport(0);
@@ -504,8 +510,11 @@ public class TestDataNodeHotSwapVolumes {
       newDirs = dir;
       break;
     }
-    dn.reconfigurePropertyImpl(
-        DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(
+            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs),
+        is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
     oldDirs.remove(newDirs);
     assertFileLocksReleased(oldDirs);
 
@@ -651,8 +660,10 @@ public class TestDataNodeHotSwapVolumes {
       public void run() {
         try {
           barrier.await();
-          dn.reconfigurePropertyImpl(
-              DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
+          assertThat(
+              "DN did not update its own config",
+              dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDirs),
+              is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
         } catch (ReconfigurationException |
             InterruptedException |
             BrokenBarrierException e) {
@@ -700,7 +711,10 @@ public class TestDataNodeHotSwapVolumes {
     String keepDataDir = oldDataDir.split(",")[0];
     String removeDataDir = oldDataDir.split(",")[1];
 
-    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir);
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir),
+        is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
     for (int i = 0; i < cluster.getNumNameNodes(); i++) {
       String bpid = cluster.getNamesystem(i).getBlockPoolId();
       BlockPoolSliceStorage bpsStorage =
@@ -717,7 +731,10 @@ public class TestDataNodeHotSwapVolumes {
 
     // Bring the removed directory back. It only successes if all metadata about
     // this directory were removed from the previous step.
-    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir),
+        is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
   }
 
   /** Get the FsVolume on the given basePath */
@@ -771,7 +788,10 @@ public class TestDataNodeHotSwapVolumes {
     assertEquals(used, failedVolume.getDfsUsed());
 
     DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
-    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir),
+        is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
 
     createFile(new Path("/test2"), 32, (short)2);
     FsVolumeImpl restoredVolume = getVolume(dn, dirToFail);
@@ -805,7 +825,11 @@ public class TestDataNodeHotSwapVolumes {
 
     // Remove a data dir from datanode
     File dataDirToKeep = new File(cluster.getDataDirectory(), "data1");
-    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString());
+    assertThat(
+        "DN did not update its own config",
+        dn.reconfigurePropertyImpl(
+            DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString()),
+        is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
 
     // We should get 1 full report
     Mockito.verify(spy, timeout(60000).times(1)).blockReport(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f51208/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 90e000b..05e6da1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -17,10 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assume.assumeTrue;
 
@@ -322,13 +324,17 @@ public class TestDataNodeVolumeFailure {
 
     // Hot swap out the failure volume.
     String dataDirs = dn0Vol2.getPath();
-    dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-        dataDirs);
+    assertThat(
+        dn0.reconfigurePropertyImpl(
+            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirs),
+        is(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)));
 
     // Fix failure volume dn0Vol1 and remount it back.
     DataNodeTestUtils.restoreDataDirFromFailure(dn0Vol1);
-    dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-        oldDataDirs);
+    assertThat(
+        dn0.reconfigurePropertyImpl(
+            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, oldDataDirs),
+        is(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)));
 
     // Fail dn0Vol2. Now since dn0Vol1 has been fixed, DN0 has sufficient
     // resources, thus it should keep running.
@@ -352,8 +358,11 @@ public class TestDataNodeVolumeFailure {
         DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
 
     // Add a new volume to DN0
-    dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-        oldDataDirs + "," + dn0VolNew.getAbsolutePath());
+    assertThat(
+        dn0.reconfigurePropertyImpl(
+            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+            oldDataDirs + "," + dn0VolNew.getAbsolutePath()),
+        is(dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)));
 
     // Fail dn0Vol1 first and hot swap it.
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f51208/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index d25a8a2..b1ea5ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -19,9 +19,11 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assume.assumeTrue;
 
@@ -591,8 +593,11 @@ public class TestDataNodeVolumeFailureReporting {
       dnNewDataDirs.append(newVol.getAbsolutePath());
     }
     try {
-      dn.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-          dnNewDataDirs.toString());
+      assertThat(
+          dn.reconfigurePropertyImpl(
+              DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+              dnNewDataDirs.toString()),
+          is(dn.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)));
     } catch (ReconfigurationException e) {
       // This can be thrown if reconfiguration tries to use a failed volume.
       // We need to swallow the exception, because some of our tests want to