You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ra...@apache.org on 2017/08/27 07:15:28 UTC

[01/50] [abbrv] hadoop git commit: HADOOP-14729. Upgrade JUnit 3 test cases to JUnit 4. Contributed by Ajay Kumar. [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 e4bf35913 -> 4a2c50b09 (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java
index 3289d78..88f3b69 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java
@@ -18,14 +18,11 @@
 package org.apache.hadoop.util;
 
 import java.io.IOException;
+import org.junit.Test;
 
-import org.apache.hadoop.util.CacheableIPList;
-import org.apache.hadoop.util.FileBasedIPList;
+import static org.junit.Assert.*;
 
-
-import junit.framework.TestCase;
-
-public class TestCacheableIPList extends TestCase {
+public class TestCacheableIPList {
 
   /**
    * Add a bunch of subnets and IPSs to the file
@@ -37,6 +34,7 @@ public class TestCacheableIPList extends TestCase {
    * test for inclusion
    * Check for exclusion
    */
+  @Test
   public void testAddWithSleepForCacheTimeout() throws IOException, InterruptedException {
 
     String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
@@ -76,6 +74,7 @@ public class TestCacheableIPList extends TestCase {
    * test for inclusion
    * Check for exclusion
    */
+  @Test
   public void testRemovalWithSleepForCacheTimeout() throws IOException, InterruptedException {
 
     String[] ips = {"10.119.103.112", "10.221.102.0/23",
@@ -115,6 +114,7 @@ public class TestCacheableIPList extends TestCase {
    * test for inclusion
    * Check for exclusion
    */
+  @Test
   public void testAddWithRefresh() throws IOException, InterruptedException {
 
     String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
@@ -154,6 +154,7 @@ public class TestCacheableIPList extends TestCase {
    * test for inclusion
    * Check for exclusion
    */
+  @Test
   public void testRemovalWithRefresh() throws IOException, InterruptedException {
 
     String[] ips = {"10.119.103.112", "10.221.102.0/23",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java
index 0e79fd1..1bb595c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java
@@ -22,14 +22,11 @@ import java.io.IOException;
 import java.util.Arrays;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.util.FileBasedIPList;
-import org.apache.hadoop.util.IPList;
 import org.junit.After;
 import org.junit.Test;
+import static org.junit.Assert.*;
 
-import junit.framework.TestCase;
-
-public class TestFileBasedIPList extends TestCase {
+public class TestFileBasedIPList {
 
   @After
   public void tearDown() {
@@ -127,6 +124,7 @@ public class TestFileBasedIPList extends TestCase {
    * test for inclusion
    * should be true as if the feature is turned off
    */
+  @Test
   public void testFileNotSpecified() {
 
     IPList ipl = new FileBasedIPList(null);
@@ -140,6 +138,7 @@ public class TestFileBasedIPList extends TestCase {
    * test for inclusion
    * should be true as if the feature is turned off
    */
+  @Test
   public void testFileMissing() {
 
     IPList ipl = new FileBasedIPList("missingips.txt");
@@ -153,6 +152,7 @@ public class TestFileBasedIPList extends TestCase {
    * test for inclusion
    * should be true as if the feature is turned off
    */
+  @Test
   public void testWithEmptyList() throws IOException {
     String[] ips = {};
 
@@ -168,6 +168,7 @@ public class TestFileBasedIPList extends TestCase {
    * test for inclusion
    * should be true as if the feature is turned off
    */
+  @Test
   public void testForBadFIle() throws IOException {
     String[] ips = { "10.221.102/23"};
 
@@ -187,6 +188,7 @@ public class TestFileBasedIPList extends TestCase {
    * Check  for inclusion with good entries
    * Check for exclusion
    */
+  @Test
   public void testWithAWrongEntry() throws IOException {
 
     String[] ips = {"10.119.103.112", "10.221.102/23", "10.221.204.1/23"};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
index 3a4ebd5..8ba930b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.util;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
-import junit.framework.Assert;
+import org.junit.Assert;
 import org.apache.hadoop.util.FindClass;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
index 58537ad..85d649c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
@@ -21,12 +21,14 @@ package org.apache.hadoop.util;
 import java.util.ArrayList;
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 
-public class TestGenericsUtil extends TestCase {
+public class TestGenericsUtil {
 
+  @Test
   public void testToArray() {
 
     //test a list of size 10
@@ -45,6 +47,7 @@ public class TestGenericsUtil extends TestCase {
     }
   }
 
+  @Test
   public void testWithEmptyList() {
     try {
       List<String> list = new ArrayList<String>();
@@ -57,6 +60,7 @@ public class TestGenericsUtil extends TestCase {
     }
   }
 
+  @Test
   public void testWithEmptyList2() {
     List<String> list = new ArrayList<String>();
     //this method should not throw IndexOutOfBoundsException
@@ -81,6 +85,7 @@ public class TestGenericsUtil extends TestCase {
     }
   }
 
+  @Test
   public void testWithGenericClass() {
 
     GenericClass<String> testSubject = new GenericClass<String>();
@@ -102,6 +107,7 @@ public class TestGenericsUtil extends TestCase {
 
   }
 
+  @Test
   public void testGenericOptionsParser() throws Exception {
      GenericOptionsParser parser = new GenericOptionsParser(
         new Configuration(), new String[] {"-jt"});
@@ -116,6 +122,7 @@ public class TestGenericsUtil extends TestCase {
             "y=z", parser.getConfiguration().get("x"));
   }
 
+  @Test
   public void testGetClass() {
 
     //test with Integer
@@ -131,6 +138,7 @@ public class TestGenericsUtil extends TestCase {
             GenericClass.class, c2);
   }
 
+  @Test
   public void testIsLog4jLogger() throws Exception {
     assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger(null));
     assertTrue("The implementation is Log4j",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java
index 8f33c9d..3de0854 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java
@@ -21,14 +21,15 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Random;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparator;
 
-public class TestIndexedSort extends TestCase {
+public class TestIndexedSort {
 
   public void sortAllEqual(IndexedSorter sorter) throws Exception {
     final int SAMPLE = 500;
@@ -128,6 +129,7 @@ public class TestIndexedSort extends TestCase {
   }
 
 
+  @Test
   public void testQuickSort() throws Exception {
     QuickSort sorter = new QuickSort();
     sortRandom(sorter);
@@ -158,6 +160,7 @@ public class TestIndexedSort extends TestCase {
     assertTrue(Arrays.equals(values, check));
   }
 
+  @Test
   public void testHeapSort() throws Exception {
     HeapSort sorter = new HeapSort();
     sortRandom(sorter);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java
index 7589e5a..e4792dc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.util;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
-
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.Test;
 
-public class TestNativeLibraryChecker extends TestCase {
+
+public class TestNativeLibraryChecker {
   private void expectExit(String [] args) {
     try {
       // should throw exit exception

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
index 341f38d..d1d392e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
@@ -28,7 +28,8 @@ import java.util.jar.JarOutputStream;
 import java.util.zip.ZipEntry;
 
 import org.junit.Assert;
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -49,8 +50,6 @@ import org.apache.hadoop.mapreduce.TaskInputOutputContext;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.junit.Test;
-
 /**
  * Tests the use of the
  * {@link org.apache.hadoop.mapreduce.filecache.DistributedCache} within the
@@ -66,7 +65,7 @@ import org.junit.Test;
  * This test is not fast: it uses MiniMRCluster.
  */
 @SuppressWarnings("deprecation")
-public class TestMRWithDistributedCache extends TestCase {
+public class TestMRWithDistributedCache {
   private static Path TEST_ROOT_DIR =
     new Path(System.getProperty("test.build.data","/tmp"));
   private static File symlinkFile = new File("distributed.first.symlink");
@@ -97,23 +96,23 @@ public class TestMRWithDistributedCache extends TestCase {
       FileSystem fs = LocalFileSystem.get(conf);
 
       // Check that 2 files and 2 archives are present
-      TestCase.assertEquals(2, localFiles.length);
-      TestCase.assertEquals(2, localArchives.length);
-      TestCase.assertEquals(2, files.length);
-      TestCase.assertEquals(2, archives.length);
+      Assert.assertEquals(2, localFiles.length);
+      Assert.assertEquals(2, localArchives.length);
+      Assert.assertEquals(2, files.length);
+      Assert.assertEquals(2, archives.length);
 
       // Check the file name
-      TestCase.assertTrue(files[0].getPath().endsWith("distributed.first"));
-      TestCase.assertTrue(files[1].getPath().endsWith("distributed.second.jar"));
+      Assert.assertTrue(files[0].getPath().endsWith("distributed.first"));
+      Assert.assertTrue(files[1].getPath().endsWith("distributed.second.jar"));
       
       // Check lengths of the files
-      TestCase.assertEquals(1, fs.getFileStatus(localFiles[0]).getLen());
-      TestCase.assertTrue(fs.getFileStatus(localFiles[1]).getLen() > 1);
+      Assert.assertEquals(1, fs.getFileStatus(localFiles[0]).getLen());
+      Assert.assertTrue(fs.getFileStatus(localFiles[1]).getLen() > 1);
 
       // Check extraction of the archive
-      TestCase.assertTrue(fs.exists(new Path(localArchives[0],
+      Assert.assertTrue(fs.exists(new Path(localArchives[0],
           "distributed.jar.inside3")));
-      TestCase.assertTrue(fs.exists(new Path(localArchives[1],
+      Assert.assertTrue(fs.exists(new Path(localArchives[1],
           "distributed.jar.inside4")));
 
       // Check the class loaders
@@ -121,18 +120,18 @@ public class TestMRWithDistributedCache extends TestCase {
       ClassLoader cl = Thread.currentThread().getContextClassLoader();
       // Both the file and the archive were added to classpath, so both
       // should be reachable via the class loader.
-      TestCase.assertNotNull(cl.getResource("distributed.jar.inside2"));
-      TestCase.assertNotNull(cl.getResource("distributed.jar.inside3"));
-      TestCase.assertNull(cl.getResource("distributed.jar.inside4"));
+      Assert.assertNotNull(cl.getResource("distributed.jar.inside2"));
+      Assert.assertNotNull(cl.getResource("distributed.jar.inside3"));
+      Assert.assertNull(cl.getResource("distributed.jar.inside4"));
 
       // Check that the symlink for the renaming was created in the cwd;
-      TestCase.assertTrue("symlink distributed.first.symlink doesn't exist",
+      Assert.assertTrue("symlink distributed.first.symlink doesn't exist",
           symlinkFile.exists());
-      TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1,
+      Assert.assertEquals("symlink distributed.first.symlink length not 1", 1,
           symlinkFile.length());
       
       //This last one is a difference between MRv2 and MRv1
-      TestCase.assertTrue("second file should be symlinked too",
+      Assert.assertTrue("second file should be symlinked too",
           expectedAbsentSymlinkFile.exists());
     }
 
@@ -188,6 +187,7 @@ public class TestMRWithDistributedCache extends TestCase {
   }
 
   /** Tests using the local job runner. */
+  @Test
   public void testLocalJobRunner() throws Exception {
     symlinkFile.delete(); // ensure symlink is not present (e.g. if test is
                           // killed part way through)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
index e15f7ab..999561a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
@@ -23,7 +23,8 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.net.URI;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.FileStatus;
@@ -38,7 +39,7 @@ import org.apache.hadoop.io.Text;
 
 
 @SuppressWarnings("unchecked")
-public class TestFileOutputCommitter extends TestCase {
+public class TestFileOutputCommitter {
   private static Path outDir = new Path(System.getProperty("test.build.data",
       "/tmp"), "output");
 
@@ -153,14 +154,18 @@ public class TestFileOutputCommitter extends TestCase {
     validateContent(outDir);
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
+
+  @Test
   public void testRecoveryV1() throws Exception {
     testRecoveryInternal(1, 1);
   }
 
+  @Test
   public void testRecoveryV2() throws Exception {
     testRecoveryInternal(2, 2);
   }
 
+  @Test
   public void testRecoveryUpgradeV1V2() throws Exception {
     testRecoveryInternal(1, 2);
   }
@@ -203,11 +208,13 @@ public class TestFileOutputCommitter extends TestCase {
     assert(dataFileFound && indexFileFound);
   }
 
+  @Test
   public void testCommitterWithFailureV1() throws Exception {
     testCommitterWithFailureInternal(1, 1);
     testCommitterWithFailureInternal(1, 2);
   }
 
+  @Test
   public void testCommitterWithFailureV2() throws Exception {
     testCommitterWithFailureInternal(2, 1);
     testCommitterWithFailureInternal(2, 2);
@@ -256,10 +263,12 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterWithDuplicatedCommitV1() throws Exception {
     testCommitterWithDuplicatedCommitInternal(1);
   }
 
+  @Test
   public void testCommitterWithDuplicatedCommitV2() throws Exception {
     testCommitterWithDuplicatedCommitInternal(2);
   }
@@ -340,10 +349,12 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterV1() throws Exception {
     testCommitterInternal(1);
   }
 
+  @Test
   public void testCommitterV2() throws Exception {
     testCommitterInternal(2);
   }
@@ -380,18 +391,22 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testMapFileOutputCommitterV1() throws Exception {
     testMapFileOutputCommitterInternal(1);
   }
 
+  @Test
   public void testMapFileOutputCommitterV2() throws Exception {
     testMapFileOutputCommitterInternal(2);
   }
 
+  @Test
   public void testMapOnlyNoOutputV1() throws Exception {
     testMapOnlyNoOutputInternal(1);
   }
 
+  @Test
   public void testMapOnlyNoOutputV2() throws Exception {
     testMapOnlyNoOutputInternal(2);
   }
@@ -456,10 +471,12 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(out);
   }
 
+  @Test
   public void testAbortV1() throws Exception {
     testAbortInternal(1);
   }
 
+  @Test
   public void testAbortV2() throws Exception {
     testAbortInternal(2);
   }
@@ -537,10 +554,12 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testFailAbortV1() throws Exception {
     testFailAbortInternal(1);
   }
 
+  @Test
   public void testFailAbortV2() throws Exception {
     testFailAbortInternal(2);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
index b6a2df0..0cc3c66 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
@@ -32,14 +32,16 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestIndexCache extends TestCase {
+public class TestIndexCache {
   private JobConf conf;
   private FileSystem fs;
   private Path p;
 
-  @Override
+  @Before
   public void setUp() throws IOException {
     conf = new JobConf();
     fs = FileSystem.getLocal(conf).getRaw();
@@ -47,6 +49,7 @@ public class TestIndexCache extends TestCase {
         "cache").makeQualified(fs.getUri(), fs.getWorkingDirectory());
   }
 
+  @Test
   public void testLRCPolicy() throws Exception {
     Random r = new Random();
     long seed = r.nextLong();
@@ -120,6 +123,7 @@ public class TestIndexCache extends TestCase {
     checkRecord(rec, totalsize);
   }
 
+  @Test
   public void testBadIndex() throws Exception {
     final int parts = 30;
     fs.delete(p, true);
@@ -152,6 +156,7 @@ public class TestIndexCache extends TestCase {
     }
   }
 
+  @Test
   public void testInvalidReduceNumberOrLength() throws Exception {
     fs.delete(p, true);
     conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
@@ -192,6 +197,7 @@ public class TestIndexCache extends TestCase {
     }
   }
 
+  @Test
   public void testRemoveMap() throws Exception {
     // This test case use two thread to call getIndexInformation and 
     // removeMap concurrently, in order to construct race condition.
@@ -241,7 +247,8 @@ public class TestIndexCache extends TestCase {
       assertEquals(true, cache.checkTotalMemoryUsed());
     }      
   }
-  
+
+  @Test
   public void testCreateRace() throws Exception {
     fs.delete(p, true);
     conf.setInt(TTConfig.TT_INDEX_CACHE, 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
index 7d3e2ed..75893f5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
@@ -31,12 +31,15 @@ import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
+import org.junit.Test;
 
-public class TestJobEndNotifier extends TestCase {
+public class TestJobEndNotifier {
   HttpServer2 server;
   URL baseUrl;
 
@@ -99,6 +102,7 @@ public class TestJobEndNotifier extends TestCase {
     }
   }
 
+  @Before
   public void setUp() throws Exception {
     new File(System.getProperty("build.webapps", "build/webapps") + "/test"
         ).mkdirs();
@@ -118,6 +122,7 @@ public class TestJobEndNotifier extends TestCase {
     FailServlet.calledTimes = 0;
   }
 
+  @After
   public void tearDown() throws Exception {
     server.stop();
   }
@@ -125,6 +130,7 @@ public class TestJobEndNotifier extends TestCase {
   /**
    * Basic validation for localRunnerNotification.
    */
+  @Test
   public void testLocalJobRunnerUriSubstitution() throws InterruptedException {
     JobStatus jobStatus = createTestJobStatus(
         "job_20130313155005308_0001", JobStatus.SUCCEEDED);
@@ -145,6 +151,7 @@ public class TestJobEndNotifier extends TestCase {
   /**
    * Validate job.end.retry.attempts for the localJobRunner.
    */
+  @Test
   public void testLocalJobRunnerRetryCount() throws InterruptedException {
     int retryAttempts = 3;
     JobStatus jobStatus = createTestJobStatus(
@@ -161,6 +168,7 @@ public class TestJobEndNotifier extends TestCase {
    * Validate that the notification times out after reaching
    * mapreduce.job.end-notification.timeout.
    */
+  @Test
   public void testNotificationTimeout() throws InterruptedException {
     Configuration conf = new Configuration();
     // Reduce the timeout to 1 second

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
index 1b533e7..fabe5f2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
@@ -33,7 +33,9 @@ import java.io.IOException;
 import java.io.LineNumberReader;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.TaskReport;
@@ -43,8 +45,6 @@ import org.apache.log4j.Layout;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.WriterAppender;
-import org.junit.Before;
-import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -53,7 +53,7 @@ import org.mockito.stubbing.Answer;
  * job monitoring is correct and prints 100% for map and reduce before 
  * successful completion.
  */
-public class TestJobMonitorAndPrint extends TestCase {
+public class TestJobMonitorAndPrint {
   private Job job;
   private Configuration conf;
   private ClientProtocol clientProtocol;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
index 20d8ab5..abbfcb2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
@@ -27,7 +27,10 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.junit.Assert;
 
@@ -55,7 +58,7 @@ import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
 
 @SuppressWarnings("unchecked")
-public class TestFileOutputCommitter extends TestCase {
+public class TestFileOutputCommitter {
   private static final Path outDir = new Path(
       System.getProperty("test.build.data",
           System.getProperty("java.io.tmpdir")),
@@ -87,12 +90,12 @@ public class TestFileOutputCommitter extends TestCase {
     fs.delete(outDir, true);
   }
   
-  @Override
+  @Before
   public void setUp() throws IOException {
     cleanup();
   }
-  
-  @Override
+
+  @After
   public void tearDown() throws IOException {
     cleanup();
   }
@@ -195,14 +198,17 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testRecoveryV1() throws Exception {
     testRecoveryInternal(1, 1);
   }
 
+  @Test
   public void testRecoveryV2() throws Exception {
     testRecoveryInternal(2, 2);
   }
 
+  @Test
   public void testRecoveryUpgradeV1V2() throws Exception {
     testRecoveryInternal(1, 2);
   }
@@ -278,18 +284,22 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterV1() throws Exception {
     testCommitterInternal(1);
   }
 
+  @Test
   public void testCommitterV2() throws Exception {
     testCommitterInternal(2);
   }
-  
+
+  @Test
   public void testCommitterWithDuplicatedCommitV1() throws Exception {
     testCommitterWithDuplicatedCommitInternal(1);
   }
 
+  @Test
   public void testCommitterWithDuplicatedCommitV2() throws Exception {
     testCommitterWithDuplicatedCommitInternal(2);
   }
@@ -336,11 +346,13 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterWithFailureV1() throws Exception {
     testCommitterWithFailureInternal(1, 1);
     testCommitterWithFailureInternal(1, 2);
   }
 
+  @Test
   public void testCommitterWithFailureV2() throws Exception {
     testCommitterWithFailureInternal(2, 1);
     testCommitterWithFailureInternal(2, 2);
@@ -390,10 +402,12 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterRepeatableV1() throws Exception {
     testCommitterRetryInternal(1);
   }
 
+  @Test
   public void testCommitterRepeatableV2() throws Exception {
     testCommitterRetryInternal(2);
   }
@@ -493,14 +507,17 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testMapFileOutputCommitterV1() throws Exception {
     testMapFileOutputCommitterInternal(1);
   }
-  
+
+  @Test
   public void testMapFileOutputCommitterV2() throws Exception {
     testMapFileOutputCommitterInternal(2);
   }
 
+  @Test
   public void testInvalidVersionNumber() throws IOException {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
@@ -552,10 +569,12 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testAbortV1() throws IOException, InterruptedException {
     testAbortInternal(1);
   }
 
+  @Test
   public void testAbortV2() throws IOException, InterruptedException {
     testAbortInternal(2);
   }
@@ -575,7 +594,7 @@ public class TestFileOutputCommitter extends TestCase {
     }
   }
 
-  
+
   private void testFailAbortInternal(int version)
       throws IOException, InterruptedException {
     Job job = Job.getInstance();
@@ -631,10 +650,12 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testFailAbortV1() throws Exception {
     testFailAbortInternal(1);
   }
 
+  @Test
   public void testFailAbortV2() throws Exception {
     testFailAbortInternal(2);
   }
@@ -732,10 +753,12 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testConcurrentCommitTaskWithSubDirV1() throws Exception {
     testConcurrentCommitTaskWithSubDir(1);
   }
 
+  @Test
   public void testConcurrentCommitTaskWithSubDirV2() throws Exception {
     testConcurrentCommitTaskWithSubDir(2);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java
index a48fe3b..a5a8eb7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java
@@ -19,7 +19,8 @@
 package org.apache.hadoop.mapreduce.lib.output;
 
 import java.io.IOException;
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -28,8 +29,9 @@ import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 
-public class TestFileOutputFormat extends TestCase {
+public class TestFileOutputFormat {
 
+  @Test
   public void testSetOutputPathException() throws Exception {
     Job job = Job.getInstance();
     try {
@@ -42,6 +44,7 @@ public class TestFileOutputFormat extends TestCase {
     }
   }
 
+  @Test
   public void testCheckOutputSpecsException() throws Exception {
     Job job = Job.getInstance();
     Path outDir = new Path(System.getProperty("test.build.data", "/tmp"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
index 768448f..96954d5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
@@ -38,7 +38,7 @@ import org.junit.Test;
  * This class performs unit test for Job/JobControl classes.
  *  
  */
-public class TestJobControl extends junit.framework.TestCase {
+public class TestJobControl {
 
   /**
    * This is a main function for testing JobControl class.
@@ -263,13 +263,13 @@ public class TestJobControl extends junit.framework.TestCase {
     JobConf jc = new JobConf();
     Job j = new Job(jc);
     //Just make sure no exception is thrown
-    assertNull(j.getAssignedJobID());
+    Assert.assertNull(j.getAssignedJobID());
     org.apache.hadoop.mapreduce.Job mockjob = mock(org.apache.hadoop.mapreduce.Job.class);
     org.apache.hadoop.mapreduce.JobID jid = new org.apache.hadoop.mapreduce.JobID("test",0);
     when(mockjob.getJobID()).thenReturn(jid);
     j.setJob(mockjob);
     JobID expected = new JobID("test",0);
-    assertEquals(expected, j.getAssignedJobID());
+    Assert.assertEquals(expected, j.getAssignedJobID());
     verify(mockjob).getJobID();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java
index ecc01db..afe4a10 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java
@@ -17,9 +17,6 @@
  */
 
 package org.apache.hadoop.mapreduce;
-
-import junit.framework.TestCase;
-
 import java.io.IOException;
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -27,9 +24,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import static org.junit.Assert.*;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
index 1428e47..194cdeb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.mapreduce.lib.input;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.List;
-
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -30,9 +30,10 @@ import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 
-public class TestDelegatingInputFormat extends TestCase {
+public class TestDelegatingInputFormat {
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testSplitting() throws Exception {
     Job job = Job.getInstance();
     MiniDFSCluster dfs = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
index d86ddd0..da011a2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
@@ -95,7 +95,7 @@ public class TestMapReduceJobControl extends HadoopTestCase {
     cjob2 = new ControlledJob(job2, dependingJobs);
 
     Job job3 = MapReduceTestUtil.createCopyJob(conf, outdir_3, 
-	                                   outdir_1, outdir_2);
+                                     outdir_1, outdir_2);
     dependingJobs = new ArrayList<ControlledJob>();
     dependingJobs.add(cjob1);
     dependingJobs.add(cjob2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java
index ae06812..14f123a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java
@@ -21,7 +21,10 @@ package org.apache.hadoop.mapreduce.lib.output;
 import java.io.*;
 import java.net.URI;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
@@ -38,7 +41,7 @@ import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
 
 
-public class TestMRCJCFileOutputCommitter extends TestCase {
+public class TestMRCJCFileOutputCommitter {
   private static Path outDir = new Path(System.getProperty("test.build.data",
       "/tmp"), "output");
 
@@ -76,17 +79,18 @@ public class TestMRCJCFileOutputCommitter extends TestCase {
     fs.delete(outDir, true);
   }
   
-  @Override
+  @Before
   public void setUp() throws IOException {
     cleanup();
   }
   
-  @Override
+  @After
   public void tearDown() throws IOException {
     cleanup();
   }
   
   @SuppressWarnings("unchecked")
+  @Test
   public void testCommitter() throws Exception {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
@@ -122,7 +126,8 @@ public class TestMRCJCFileOutputCommitter extends TestCase {
     assertEquals(output, expectedOutput.toString());
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
-  
+
+  @Test
   public void testEmptyOutput() throws Exception {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
@@ -146,6 +151,7 @@ public class TestMRCJCFileOutputCommitter extends TestCase {
   }
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testAbort() throws IOException, InterruptedException {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
@@ -195,6 +201,7 @@ public class TestMRCJCFileOutputCommitter extends TestCase {
   }
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testFailAbort() throws IOException, InterruptedException {
     Job job = Job.getInstance();
     Configuration conf = job.getConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java
index 508ded3..471c68f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java
@@ -22,23 +22,30 @@ import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import org.junit.Assert;
 
-public class TestTaskContext extends TestCase {
-  
+public class TestTaskContext {
+
+  @Test
   public void testTaskContext() {
-    TaskContext context = new TaskContext(null, null, null, null, null, null, null);
+    TaskContext context = new TaskContext(null, null, null, null, null, null,
+        null);
     
     context.setInputKeyClass(IntWritable.class);
-    assertEquals(IntWritable.class.getName(), context.getInputKeyClass().getName());
+    Assert.assertEquals(IntWritable.class.getName(), context.getInputKeyClass
+        ().getName());
  
     context.setInputValueClass(Text.class);
-    assertEquals(Text.class.getName(), context.getInputValueClass().getName()); 
+    Assert.assertEquals(Text.class.getName(), context.getInputValueClass()
+        .getName());
    
     context.setOutputKeyClass(LongWritable.class);
-    assertEquals(LongWritable.class.getName(), context.getOutputKeyClass().getName()); 
+    Assert.assertEquals(LongWritable.class.getName(), context
+        .getOutputKeyClass().getName());
 
     context.setOutputValueClass(FloatWritable.class);
-    assertEquals(FloatWritable.class.getName(), context.getOutputValueClass().getName()); 
+    Assert.assertEquals(FloatWritable.class.getName(), context
+        .getOutputValueClass().getName());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java
index 7eb6467..fa68364 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java
@@ -19,11 +19,12 @@ package org.apache.hadoop.mapred.nativetask.buffer;
 
 import java.io.IOException;
 
-import junit.framework.TestCase;
-
+import org.junit.Test;
 import org.junit.Assert;
 
-public class TestInputBuffer extends TestCase {
+public class TestInputBuffer {
+
+  @Test
   public void testInputBuffer() throws IOException {
     final int size = 100;
     final InputBuffer input1 = new InputBuffer(BufferType.DIRECT_BUFFER, size);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java
index 39c25a6..af6693e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.mapred.nativetask.buffer;
 
-import junit.framework.TestCase;
-
+import org.junit.Test;
 import org.junit.Assert;
 
-public class TestOutputBuffer extends TestCase {
+public class TestOutputBuffer {
+
+  @Test
   public void testOutputBuffer() {
     final int size = 100;
     final OutputBuffer output1 = new OutputBuffer(BufferType.DIRECT_BUFFER, size);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java
index fd5b100..1a7dace 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.mapred.nativetask.serde;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
 
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.mapred.nativetask.Constants;
@@ -30,12 +31,11 @@ import org.apache.hadoop.mapred.nativetask.testutil.TestInput;
 import org.apache.hadoop.mapred.nativetask.testutil.TestInput.KV;
 import org.apache.hadoop.mapred.nativetask.util.SizedWritable;
 import org.junit.Assert;
-import org.junit.Before;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 
 @SuppressWarnings({ "rawtypes", "unchecked" })
-public class TestKVSerializer extends TestCase {
+public class TestKVSerializer {
 
   int inputArraySize = 1000; // 1000 bytesWriable elements
   int bufferSize = 100; // bytes
@@ -46,7 +46,6 @@ public class TestKVSerializer extends TestCase {
   private SizedWritable value;
   private KVSerializer serializer;
 
-  @Override
   @Before
   public void setUp() throws IOException {
     this.inputArray = TestInput.getMapInputs(inputArraySize);
@@ -60,6 +59,7 @@ public class TestKVSerializer extends TestCase {
     serializer.updateLength(key, value);
   }
 
+  @Test
   public void testUpdateLength() throws IOException {
     Mockito.mock(DataOutputStream.class);
 
@@ -75,6 +75,7 @@ public class TestKVSerializer extends TestCase {
     }
   }
 
+  @Test
   public void testSerializeKV() throws IOException {
     final DataOutputStream dataOut = Mockito.mock(DataOutputStream.class);
 
@@ -92,6 +93,7 @@ public class TestKVSerializer extends TestCase {
     Assert.assertEquals(written, key.length + value.length + Constants.SIZEOF_KV_LENGTH);
   }
 
+  @Test
   public void testSerializeNoFlush() throws IOException {
     final DataOutputStream dataOut = Mockito.mock(DataOutputStream.class);
 
@@ -109,6 +111,7 @@ public class TestKVSerializer extends TestCase {
     Assert.assertEquals(written, key.length + value.length + Constants.SIZEOF_KV_LENGTH);
   }
 
+  @Test
   public void testSerializePartitionKV() throws IOException {
     final DataOutputStream dataOut = Mockito.mock(DataOutputStream.class);
 
@@ -130,12 +133,14 @@ public class TestKVSerializer extends TestCase {
         + Constants.SIZEOF_PARTITION_LENGTH);
   }
 
+  @Test
   public void testDeserializerNoData() throws IOException {
     final DataInputStream in = Mockito.mock(DataInputStream.class);
     Mockito.when(in.hasUnReadData()).thenReturn(false);
     Assert.assertEquals(0, serializer.deserializeKV(in, key, value));
   }
 
+  @Test
   public void testDeserializer() throws IOException {
     final DataInputStream in = Mockito.mock(DataInputStream.class);
     Mockito.when(in.hasUnReadData()).thenReturn(true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java
index 6ea8092..584aedd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java
@@ -17,15 +17,16 @@
  */
 package org.apache.hadoop.mapred.nativetask.utils;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import org.junit.Assert;
 
 import org.apache.hadoop.mapred.nativetask.util.ReadWriteBuffer;
-import org.junit.Assert;
 
-public class TestReadWriteBuffer extends TestCase {
+public class TestReadWriteBuffer {
 
   private static byte[] bytes = new byte[] { '0', 'a', 'b', 'c', 'd', '9' };
 
+  @Test
   public void testReadWriteBuffer() {
 
     final ReadWriteBuffer buffer = new ReadWriteBuffer();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java
index 7b82eff..a6e43ed 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java
@@ -17,15 +17,16 @@
  */
 package org.apache.hadoop.mapred.nativetask.utils;
 
-import junit.framework.TestCase;
+import org.junit.Test;
 
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.mapred.nativetask.util.SizedWritable;
 import org.junit.Assert;
 
 @SuppressWarnings({ "rawtypes", "unchecked" })
-public class TestSizedWritable extends TestCase {
+public class TestSizedWritable {
 
+  @Test
   public void testSizedWritable() {
     final SizedWritable w = new SizedWritable(BytesWritable.class);
     Assert.assertTrue(w.length == SizedWritable.INVALID_LENGTH);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
index 3215bfe..2df2df0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
@@ -18,29 +18,35 @@
 package org.apache.hadoop.examples;
 
 import java.math.BigInteger;
+import org.junit.Test;
+import org.junit.Assert;
 
 /** Tests for BaileyBorweinPlouffe */
-public class TestBaileyBorweinPlouffe extends junit.framework.TestCase {
+public class TestBaileyBorweinPlouffe {
 
+  @Test
   public void testMod() {
     final BigInteger TWO = BigInteger.ONE.add(BigInteger.ONE);
     for(long n = 3; n < 100; n++) {
       for (long e = 1; e < 100; e++) {
         final long r = TWO.modPow(
             BigInteger.valueOf(e), BigInteger.valueOf(n)).longValue();
-        assertEquals("e=" + e + ", n=" + n, r, BaileyBorweinPlouffe.mod(e, n));
+        Assert.assertEquals("e=" + e + ", n=" + n, r, BaileyBorweinPlouffe
+            .mod(e, n));
       }
     }
   }
 
+  @Test
   public void testHexDigit() {
     final long[] answers = {0x43F6, 0xA308, 0x29B7, 0x49F1, 0x8AC8, 0x35EA};
     long d = 1;
     for(int i = 0; i < answers.length; i++) {
-      assertEquals("d=" + d, answers[i], BaileyBorweinPlouffe.hexDigits(d));
+      Assert.assertEquals("d=" + d, answers[i], BaileyBorweinPlouffe
+          .hexDigits(d));
       d *= 10;
     }
 
-    assertEquals(0x243FL, BaileyBorweinPlouffe.hexDigits(0));
+    Assert.assertEquals(0x243FL, BaileyBorweinPlouffe.hexDigits(0));
  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java
index 991121f..d6f284e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java
@@ -19,24 +19,30 @@ package org.apache.hadoop.examples.pi.math;
 
 import java.math.BigInteger;
 import java.util.Random;
+import org.junit.Test;
+import org.junit.Assert;
 
-public class TestLongLong extends junit.framework.TestCase {
-  static final Random RAN = new Random(); 
+public class TestLongLong {
+
+  static final Random RAN = new Random();
   static final long MASK = (1L << (LongLong.SIZE >> 1)) - 1;
 
   static long nextPositiveLong() {
     return RAN.nextLong() & MASK;
   }
-  
+
   static void verifyMultiplication(long a, long b) {
     final LongLong ll = LongLong.multiplication(new LongLong(), a, b);
     final BigInteger bi = BigInteger.valueOf(a).multiply(BigInteger.valueOf(b));
 
-    final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
+    final String s = String.format(
+        "\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a,
+        b);
     //System.out.println(s);
-    assertEquals(s, bi, ll.toBigInteger());
+    Assert.assertEquals(s, bi, ll.toBigInteger());
   }
 
+  @Test
   public void testMultiplication() {
     for(int i = 0; i < 100; i++) {
       final long a = nextPositiveLong();
@@ -50,19 +56,24 @@ public class TestLongLong extends junit.framework.TestCase {
   static void verifyRightShift(long a, long b) {
     final LongLong ll = new LongLong().set(a, b);
     final BigInteger bi = ll.toBigInteger();
-    
-    for(int i = 0; i < LongLong.SIZE >> 1; i++) {
+
+    for (int i = 0; i < LongLong.SIZE >> 1; i++) {
       final long result = ll.shiftRight(i) & MASK;
       final long expected = bi.shiftRight(i).longValue() & MASK;
-      final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
-      assertEquals(s, expected, result);
+      final String s = String.format(
+          "\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a,
+          b);
+      Assert.assertEquals(s, expected, result);
     }
 
-    final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
+    final String s = String.format(
+        "\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a,
+        b);
     //System.out.println(s);
-    assertEquals(s, bi, ll.toBigInteger());
+    Assert.assertEquals(s, bi, ll.toBigInteger());
   }
 
+  @Test
   public void testRightShift() {
     for(int i = 0; i < 1000; i++) {
       final long a = nextPositiveLong();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java
index 079eb7f..a75ec29 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java
@@ -21,14 +21,16 @@ import java.math.BigInteger;
 import java.util.Random;
 
 import org.apache.hadoop.examples.pi.Util.Timer;
+import org.junit.Assert;
+import org.junit.Test;
 
-public class TestModular extends junit.framework.TestCase { 
-  private static final Random RANDOM = new Random(); 
+public class TestModular{
+  private static final Random RANDOM = new Random();
   private static final BigInteger TWO = BigInteger.valueOf(2);
 
 
   static final int DIV_VALID_BIT = 32;
-  static final long DIV_LIMIT = 1L << DIV_VALID_BIT; 
+  static final long DIV_LIMIT = 1L << DIV_VALID_BIT;
 
   // return r/n for n > r > 0
   static long div(long sum, long r, long n) {
@@ -36,7 +38,7 @@ public class TestModular extends junit.framework.TestCase {
     int i = DIV_VALID_BIT - 1;
     for(r <<= 1; r < n; r <<= 1) i--;
 //System.out.printf("  r=%d, n=%d, q=%d\n", r, n, q);
-    
+
     for(; i >= 0 ;) {
       r -= n;
       q |= (1L << i);
@@ -48,14 +50,15 @@ public class TestModular extends junit.framework.TestCase {
     sum += q;
     return sum < DIV_LIMIT? sum: sum - DIV_LIMIT;
   }
- 
+
+  @Test
   public void testDiv() {
     for(long n = 2; n < 100; n++)
       for(long r = 1; r < n; r++) {
         final long a = div(0, r, n);
         final long b = (long)((r*1.0/n) * (1L << DIV_VALID_BIT));
         final String s = String.format("r=%d, n=%d, a=%X, b=%X", r, n, a, b);
-        assertEquals(s, b, a);
+        Assert.assertEquals(s, b, a);
       }
   }
 
@@ -64,16 +67,16 @@ public class TestModular extends junit.framework.TestCase {
 
     for(int i = 0; i < rn.length; i++) {
       rn[i] = new long[rsize + 1][];
-      long n = RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL; 
+      long n = RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL;
       if (n <= 1) n = 0xFFFFFFFFFFFFFFFL - n;
       rn[i][0] = new long[]{n};
-      final BigInteger N = BigInteger.valueOf(n); 
+      final BigInteger N = BigInteger.valueOf(n);
 
       for(int j = 1; j < rn[i].length; j++) {
         long r = RANDOM.nextLong();
         if (r < 0) r = -r;
         if (r >= n) r %= n;
-        final BigInteger R = BigInteger.valueOf(r); 
+        final BigInteger R = BigInteger.valueOf(r);
         rn[i][j] = new long[]{r, R.multiply(R).mod(N).longValue()};
       }
     }
@@ -102,20 +105,20 @@ public class TestModular extends junit.framework.TestCase {
     } else {
       final int HALF = (63 - Long.numberOfLeadingZeros(n)) >> 1;
       final int FULL = HALF << 1;
-      final long ONES = (1 << HALF) - 1; 
-  
+      final long ONES = (1 << HALF) - 1;
+
       final long high = r >>> HALF;
       final long low  = r &= ONES;
 
       r *= r;
       if (r >= n) r %= n;
-  
+
       if (high != 0) {
         long s = high * high;
         if (s >= n) s %= n;
         for(int i = 0; i < FULL; i++)
           if ((s <<= 1) >= n) s -= n;
-        
+
         if (low == 0)
           r = s;
         else {
@@ -123,7 +126,7 @@ public class TestModular extends junit.framework.TestCase {
           if (t >= n) t %= n;
           for(int i = -1; i < HALF; i++)
             if ((t <<= 1) >= n) t -= n;
-          
+
           r += s;
           if (r >= n) r -= n;
           r += t;
@@ -133,7 +136,7 @@ public class TestModular extends junit.framework.TestCase {
     }
     return r;
   }
-  
+
   static void squareBenchmarks() {
     final Timer t = new Timer(false);
     t.tick("squareBenchmarks(), MAX_SQRT=" + Modular.MAX_SQRT_LONG);
@@ -147,8 +150,11 @@ public class TestModular extends junit.framework.TestCase {
         final long r = rn[i][j][0];
         final long answer = rn[i][j][1];
         final long s = square_slow(r, n);
-        if (s != answer)
-          assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("square_slow");
@@ -161,8 +167,11 @@ public class TestModular extends junit.framework.TestCase {
         final long r = rn[i][j][0];
         final long answer = rn[i][j][1];
         final long s = square(r, n, r2p64);
-        if (s != answer)
-          assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("square");
@@ -175,8 +184,11 @@ public class TestModular extends junit.framework.TestCase {
         final long answer = rn[i][j][1];
         final BigInteger R = BigInteger.valueOf(r);
         final long s = R.multiply(R).mod(N).longValue();
-        if (s != answer)
-          assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("R.multiply(R).mod(N)");
@@ -189,8 +201,11 @@ public class TestModular extends junit.framework.TestCase {
         final long answer = rn[i][j][1];
         final BigInteger R = BigInteger.valueOf(r);
         final long s = R.modPow(TWO, N).longValue();
-        if (s != answer)
-          assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("R.modPow(TWO, N)");
@@ -201,15 +216,15 @@ public class TestModular extends junit.framework.TestCase {
 
     for(int i = 0; i < en.length; i++) {
       en[i] = new long[esize + 1][];
-      long n = (RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL) | 1L; 
+      long n = (RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL) | 1L;
       if (n == 1) n = 3;
       en[i][0] = new long[]{n};
-      final BigInteger N = BigInteger.valueOf(n); 
+      final BigInteger N = BigInteger.valueOf(n);
 
       for(int j = 1; j < en[i].length; j++) {
         long e = RANDOM.nextLong();
         if (e < 0) e = -e;
-        final BigInteger E = BigInteger.valueOf(e); 
+        final BigInteger E = BigInteger.valueOf(e);
         en[i][j] = new long[]{e, TWO.modPow(E, N).longValue()};
       }
     }
@@ -253,10 +268,10 @@ public class TestModular extends junit.framework.TestCase {
   static class Montgomery2 extends Montgomery {
     /** Compute 2^y mod N for N odd. */
     long mod2(final long y) {
-      long r0 = R - N; 
+      long r0 = R - N;
       long r1 = r0 << 1;
       if (r1 >= N) r1 -= N;
-      
+
       for(long mask = Long.highestOneBit(y); mask > 0; mask >>>= 1) {
         if ((mask & y) == 0) {
           r1 = product.m(r0, r1);
@@ -269,7 +284,7 @@ public class TestModular extends junit.framework.TestCase {
       return product.m(r0, 1);
     }
   }
-  
+
   static void modBenchmarks() {
     final Timer t = new Timer(false);
     t.tick("modBenchmarks()");
@@ -283,12 +298,15 @@ public class TestModular extends junit.framework.TestCase {
         final long e = en[i][j][0];
         final long answer = en[i][j][1];
         final long s = Modular.mod(e, n);
-        if (s != answer)
-          assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("Modular.mod");
-    
+
     final Montgomery2 m2 = new Montgomery2();
     for(int i = 0; i < en.length; i++) {
       final long n = en[i][0][0];
@@ -297,8 +315,11 @@ public class TestModular extends junit.framework.TestCase {
         final long e = en[i][j][0];
         final long answer = en[i][j][1];
         final long s = m2.mod(e);
-        if (s != answer)
-          assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("montgomery.mod");
@@ -310,21 +331,27 @@ public class TestModular extends junit.framework.TestCase {
         final long e = en[i][j][0];
         final long answer = en[i][j][1];
         final long s = m2.mod2(e);
-        if (s != answer)
-          assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("montgomery.mod2");
 
     for(int i = 0; i < en.length; i++) {
       final long n = en[i][0][0];
-      final BigInteger N = BigInteger.valueOf(n); 
+      final BigInteger N = BigInteger.valueOf(n);
       for(int j = 1; j < en[i].length; j++) {
         final long e = en[i][j][0];
         final long answer = en[i][j][1];
         final long s = TWO.modPow(BigInteger.valueOf(e), N).longValue();
-        if (s != answer)
-          assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("BigInteger.modPow(e, n)");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java
index e80b9bb..2741962 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java
@@ -28,14 +28,19 @@ import org.apache.hadoop.examples.pi.Container;
 import org.apache.hadoop.examples.pi.Util;
 import org.apache.hadoop.examples.pi.Util.Timer;
 import org.apache.hadoop.examples.pi.math.TestModular.Montgomery2;
+import org.junit.Test;
+import org.junit.Assert;
 
-public class TestSummation extends junit.framework.TestCase {
+public class TestSummation {
   static final Random RANDOM = new Random();
   static final BigInteger TWO = BigInteger.valueOf(2);
+  private static final double DOUBLE_DELTA = 0.000000001f;
 
   private static Summation2 newSummation(final long base, final long range, final long delta) {
-    final ArithmeticProgression N = new ArithmeticProgression('n', base+3, delta, base+3+range);
-    final ArithmeticProgression E = new ArithmeticProgression('e', base+range, -delta, base);
+    final ArithmeticProgression N = new ArithmeticProgression('n', base + 3,
+        delta, base + 3 + range);
+    final ArithmeticProgression E = new ArithmeticProgression('e', base + range,
+        -delta, base);
     return new Summation2(N, E);
   }
 
@@ -53,10 +58,11 @@ public class TestSummation extends junit.framework.TestCase {
 
     final List<Summation> combined = Util.combine(a);
 //    Util.out.println("combined=" + combined);
-    assertEquals(1, combined.size());
-    assertEquals(sigma, combined.get(0));
+    Assert.assertEquals(1, combined.size());
+    Assert.assertEquals(sigma, combined.get(0));
   }
 
+  @Test
   public void testSubtract() {
     final Summation sigma = newSummation(3, 10000, 20);
     final int size = 10;
@@ -112,7 +118,9 @@ public class TestSummation extends junit.framework.TestCase {
       long n = N.value;
       double s = 0;
       for(; e > E.limit; e += E.delta) {
-        s = Modular.addMod(s, TWO.modPow(BigInteger.valueOf(e), BigInteger.valueOf(n)).doubleValue()/n);
+        s = Modular.addMod(s,
+            TWO.modPow(BigInteger.valueOf(e), BigInteger.valueOf(n))
+                .doubleValue() / n);
         n += N.delta;
       }
       return s;
@@ -124,16 +132,16 @@ public class TestSummation extends junit.framework.TestCase {
     t.tick("sigma=" + sigma);
     final double value = sigma.compute();
     t.tick("compute=" + value);
-    assertEquals(value, sigma.compute_modular());
+    Assert.assertEquals(value, sigma.compute_modular(), DOUBLE_DELTA);
     t.tick("compute_modular");
-    assertEquals(value, sigma.compute_montgomery());
+    Assert.assertEquals(value, sigma.compute_montgomery(), DOUBLE_DELTA);
     t.tick("compute_montgomery");
-    assertEquals(value, sigma.compute_montgomery2());
+    Assert.assertEquals(value, sigma.compute_montgomery2(), DOUBLE_DELTA);
     t.tick("compute_montgomery2");
 
-    assertEquals(value, sigma.compute_modBigInteger());
+    Assert.assertEquals(value, sigma.compute_modBigInteger(), DOUBLE_DELTA);
     t.tick("compute_modBigInteger");
-    assertEquals(value, sigma.compute_modPow());
+    Assert.assertEquals(value, sigma.compute_modPow(), DOUBLE_DELTA);
     t.tick("compute_modPow");
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
index dbb8ef0..2daae2e 100644
--- a/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
+++ b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
@@ -20,10 +20,10 @@ package org.apache.hadoop.contrib.utils.join;
 
 import java.io.IOException;
 
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-import junit.extensions.TestSetup;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -36,24 +36,27 @@ import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.*;
 
-public class TestDataJoin extends TestCase {
-
+/**
+ * Class to test JOIN between 2 data
+ * sources.
+ */
+public class TestDataJoin {
   private static MiniDFSCluster cluster = null;
-  public static Test suite() {
-    TestSetup setup = new TestSetup(new TestSuite(TestDataJoin.class)) {
-      protected void setUp() throws Exception {
-        Configuration conf = new Configuration();
-        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      }
-      protected void tearDown() throws Exception {
-        if (cluster != null) {
-          cluster.shutdown();
-        }
-      }
-    };
-    return setup;
+
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = new Configuration();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
   }
 
+  @Test
   public void testDataJoin() throws Exception {
     final int srcs = 4;
     JobConf job = new JobConf();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
index 3704c5b..3e52b3c 100644
--- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
+++ b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
@@ -38,11 +38,13 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
 import org.slf4j.event.Level;
 
 import static org.slf4j.LoggerFactory.getLogger;
 
-public class TestDistCh extends junit.framework.TestCase {
+public class TestDistCh {
   {
     GenericTestUtils.setLogLevel(
         getLogger("org.apache.hadoop.hdfs.StateChange"), Level.ERROR);
@@ -75,20 +77,20 @@ public class TestDistCh extends junit.framework.TestCase {
 
     Path createSmallFile(Path dir) throws IOException {
       final Path f = new Path(dir, "f" + ++fcount);
-      assertTrue(!fs.exists(f));
+      Assert.assertTrue(!fs.exists(f));
       final DataOutputStream out = fs.create(f);
       try {
         out.writeBytes("createSmallFile: f=" + f);
       } finally {
         out.close();
       }
-      assertTrue(fs.exists(f));
+      Assert.assertTrue(fs.exists(f));
       return f;
     }
 
     Path mkdir(Path dir) throws IOException {
-      assertTrue(fs.mkdirs(dir));
-      assertTrue(fs.getFileStatus(dir).isDirectory());
+      Assert.assertTrue(fs.mkdirs(dir));
+      Assert.assertTrue(fs.getFileStatus(dir).isDirectory());
       return dir;
     }
     
@@ -127,7 +129,8 @@ public class TestDistCh extends junit.framework.TestCase {
       defaultPerm = permission == null || "".equals(permission);
     }
   }
-  
+
+  @Test
   public void testDistCh() throws Exception {
     final Configuration conf = new Configuration();
 
@@ -190,13 +193,13 @@ public class TestDistCh extends junit.framework.TestCase {
   }
 
   static void checkFileStatus(ChPermissionStatus expected, FileStatus actual) {
-    assertEquals(expected.getUserName(), actual.getOwner());
-    assertEquals(expected.getGroupName(), actual.getGroup());
+    Assert.assertEquals(expected.getUserName(), actual.getOwner());
+    Assert.assertEquals(expected.getGroupName(), actual.getGroup());
     FsPermission perm = expected.getPermission();
     if (actual.isFile() && expected.defaultPerm) {
       perm = perm.applyUMask(UMASK);
     }
-    assertEquals(perm, actual.getPermission());
+    Assert.assertEquals(perm, actual.getPermission());
   }
 
   private static String runLsr(final FsShell shell, String root, int returnvalue
@@ -210,7 +213,7 @@ public class TestDistCh extends junit.framework.TestCase {
     System.setErr(out);
     final String results;
     try {
-      assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
+      Assert.assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
       results = bytes.toString();
     } finally {
       IOUtils.closeStream(out);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
index 3a81e42..1ca5436 100644
--- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
+++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
@@ -26,10 +26,12 @@ import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestTypedBytesWritable extends TestCase {
+public class TestTypedBytesWritable {
 
+  @Test
   public void testToString() {
     TypedBytesWritable tbw = new TypedBytesWritable();
     tbw.setValue(true);
@@ -46,6 +48,7 @@ public class TestTypedBytesWritable extends TestCase {
     assertEquals("random text", tbw.toString());
   }
 
+  @Test
   public void testIO() throws IOException {
     TypedBytesWritable tbw = new TypedBytesWritable();
     tbw.setValue(12345);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
index 63c2cf3..07478ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
@@ -22,20 +22,23 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.junit.Test;
 
-public class TestAmFilterInitializer extends TestCase {
+/**
+ * Test class for {@Link AmFilterInitializer}.
+ */
+public class TestAmFilterInitializer {
 
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
+  @Before
+  public void setUp() throws Exception {
     NetUtils.addStaticResolution("host1", "172.0.0.1");
     NetUtils.addStaticResolution("host2", "172.0.0.1");
     NetUtils.addStaticResolution("host3", "172.0.0.1");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDFS-11150: [SPS]: Provide persistence when satisfying storage policy. Contributed by Yuanbo Liu

Posted by ra...@apache.org.
HDFS-11150: [SPS]: Provide persistence when satisfying storage policy. Contributed by Yuanbo Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/498e4de0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/498e4de0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/498e4de0

Branch: refs/heads/HDFS-10285
Commit: 498e4de0b5a263a62040146419656b270b3d3f88
Parents: 6289650
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Wed Jan 11 13:48:58 2017 -0800
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:01 2017 +0530

----------------------------------------------------------------------
 .../hadoop/hdfs/protocol/ClientProtocol.java    |   2 +-
 .../hdfs/server/common/HdfsServerConstants.java |   3 +
 .../hdfs/server/namenode/FSDirAttrOp.java       |  81 +++--
 .../hdfs/server/namenode/FSDirXAttrOp.java      |   8 +
 .../hdfs/server/namenode/FSDirectory.java       |  14 +
 .../hdfs/server/namenode/FSNamesystem.java      |   6 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 +-
 .../server/namenode/StoragePolicySatisfier.java |  22 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  37 +++
 .../TestPersistentStoragePolicySatisfier.java   | 311 +++++++++++++++++++
 .../namenode/TestStoragePolicySatisfier.java    | 112 +++----
 ...stStoragePolicySatisfierWithStripedFile.java |  17 +-
 12 files changed, 532 insertions(+), 94 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index af70eea..66989f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1682,7 +1682,7 @@ public interface ClientProtocol {
    * @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException append not
    *           allowed in safemode.
    */
-  @Idempotent
+  @AtMostOnce
   void satisfyStoragePolicy(String path) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index e486317..42a2fc6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -365,6 +365,9 @@ public interface HdfsServerConstants {
   String XATTR_ERASURECODING_POLICY =
       "system.hdfs.erasurecoding.policy";
 
+  String XATTR_SATISFY_STORAGE_POLICY =
+      "system.hdfs.satisfy.storage.policy";
+
   Path MOVER_ID_PATH = new Path("/system/mover.id");
 
   long BLOCK_GROUP_INDEX_MASK = 15;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index d7f40e0..2a27df1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -42,12 +43,14 @@ import com.google.common.collect.Lists;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
@@ -197,10 +200,11 @@ public class FSDirAttrOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static void satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
-      String src) throws IOException {
+  static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
+      String src, boolean logRetryCache) throws IOException {
 
     FSPermissionChecker pc = fsd.getPermissionChecker();
+    List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -210,10 +214,13 @@ public class FSDirAttrOp {
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
       }
-      unprotectedSatisfyStoragePolicy(bm, iip);
+      XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
+      xAttrs.add(satisfyXAttr);
     } finally {
       fsd.writeUnlock();
     }
+    fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
@@ -477,33 +484,61 @@ public class FSDirAttrOp {
     }
   }
 
-  static void unprotectedSatisfyStoragePolicy(BlockManager bm,
-      INodesInPath iip) throws IOException {
+  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
+      BlockManager bm, FSDirectory fsd) throws IOException {
 
-    // check whether file exists.
-    INode inode = iip.getLastINode();
-    if (inode == null) {
-      throw new FileNotFoundException("File/Directory does not exist: "
-          + iip.getPath());
-    }
+    final INode inode = FSDirectory.resolveLastINode(iip);
+    final int snapshotId = iip.getLatestSnapshotId();
+    final List<INode> candidateNodes = new ArrayList<>();
 
-    // TODO: need to check whether inode's storage policy
-    // has been satisfied or inode exists in the satisfier
-    // list before calling satisfyStoragePolicy in BlockManager.
-    if (inode.isDirectory()) {
-      final int snapshotId = iip.getLatestSnapshotId();
+    // TODO: think about optimization here, label the dir instead
+    // of the sub-files of the dir.
+    if (inode.isFile()) {
+      candidateNodes.add(inode);
+    } else if (inode.isDirectory()) {
       for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
         if (node.isFile()) {
-          bm.satisfyStoragePolicy(node.getId());
-
+          candidateNodes.add(node);
         }
       }
-    } else if (inode.isFile()) {
-      bm.satisfyStoragePolicy(inode.getId());
-    } else {
-      throw new FileNotFoundException("File/Directory does not exist: "
-          + iip.getPath());
     }
+
+    // If node has satisfy xattr, then stop adding it
+    // to satisfy movement queue.
+    if (inodeHasSatisfyXAttr(candidateNodes)) {
+      throw new IOException(
+          "Cannot request to call satisfy storage policy on path "
+          + iip.getPath()
+          + ", as this file/dir was already called for satisfying "
+          + "storage policy.");
+    }
+
+    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
+    final XAttr satisfyXAttr =
+        XAttrHelper.buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
+    xattrs.add(satisfyXAttr);
+
+    for (INode node : candidateNodes) {
+      bm.satisfyStoragePolicy(node.getId());
+      List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(node);
+      List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(
+          fsd, existingXAttrs, xattrs, EnumSet.of(XAttrSetFlag.CREATE));
+      XAttrStorage.updateINodeXAttrs(node, newXAttrs, snapshotId);
+    }
+    return satisfyXAttr;
+  }
+
+  private static boolean inodeHasSatisfyXAttr(List<INode> candidateNodes) {
+    // If the node is a directory and one of the child files
+    // has satisfy xattr, then return true for this directory.
+    for (INode inode : candidateNodes) {
+      final XAttrFeature f = inode.getXAttrFeature();
+      if (inode.isFile() &&
+          f != null && f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null) {
+        return true;
+      }
+    }
+    return false;
   }
 
   private static void setDirStoragePolicy(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index acdade7..f21aa17 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -42,6 +42,7 @@ import java.util.ListIterator;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 class FSDirXAttrOp {
   private static final XAttr KEYID_XATTR =
@@ -284,6 +285,13 @@ class FSDirXAttrOp {
         }
       }
 
+      // Add inode id to movement queue if xattrs contain satisfy xattr.
+      if (XATTR_SATISFY_STORAGE_POLICY.equals(xaName)) {
+        FSDirAttrOp.unprotectedSatisfyStoragePolicy(iip,
+            fsd.getBlockManager(), fsd);
+        continue;
+      }
+
       if (!isFile && SECURITY_XATTR_UNREADABLE_BY_SUPERUSER.equals(xaName)) {
         throw new IOException("Can only set '" +
             SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index e6aa533..b8d90f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -91,6 +91,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DE
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
 
 /**
@@ -1344,10 +1345,23 @@ public class FSDirectory implements Closeable {
       if (!inode.isSymlink()) {
         final XAttrFeature xaf = inode.getXAttrFeature();
         addEncryptionZone((INodeWithAdditionalFields) inode, xaf);
+        addStoragePolicySatisfier((INodeWithAdditionalFields) inode, xaf);
       }
     }
   }
 
+  private void addStoragePolicySatisfier(INodeWithAdditionalFields inode,
+      XAttrFeature xaf) {
+    if (xaf == null || inode.isDirectory()) {
+      return;
+    }
+    XAttr xattr = xaf.getXAttr(XATTR_SATISFY_STORAGE_POLICY);
+    if (xattr == null) {
+      return;
+    }
+    getBlockManager().satisfyStoragePolicy(inode.getId());
+  }
+
   private void addEncryptionZone(INodeWithAdditionalFields inode,
       XAttrFeature xaf) {
     if (xaf == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0122f3a..a5f5eaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2144,7 +2144,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    *
    * @param src file/directory path
    */
-  void satisfyStoragePolicy(String src) throws IOException {
+  void satisfyStoragePolicy(String src, boolean logRetryCache)
+      throws IOException {
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -2166,8 +2167,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
                 + " by admin. Seek for an admin help to activate it "
                 + "or use Mover tool.");
       }
-      // TODO: need to update editlog for persistence.
-      FSDirAttrOp.satisfyStoragePolicy(dir, blockManager, src);
+      FSDirAttrOp.satisfyStoragePolicy(dir, blockManager, src, logRetryCache);
     } finally {
       writeUnlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 8d95626..c861cef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1369,7 +1369,18 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   @Override // ClientProtocol
   public void satisfyStoragePolicy(String src) throws IOException {
     checkNNStartup();
-    namesystem.satisfyStoragePolicy(src);
+    namesystem.checkOperation(OperationCategory.WRITE);
+    CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return; // Return previous response
+    }
+    boolean success = false;
+    try {
+      namesystem.satisfyStoragePolicy(src, cacheEntry != null);
+      success = true;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
+    }
   }
 
   @Override // ClientProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index b1b1464..3b19833 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -168,16 +168,18 @@ public class StoragePolicySatisfier implements Runnable {
     }
     while (namesystem.isRunning() && isRunning) {
       try {
-        Long blockCollectionID = storageMovementNeeded.get();
-        if (blockCollectionID != null) {
-          BlockCollection blockCollection =
-              namesystem.getBlockCollection(blockCollectionID);
-          // Check blockCollectionId existence.
-          if (blockCollection != null) {
-            boolean allBlockLocsAttemptedToSatisfy =
-                computeAndAssignStorageMismatchedBlocksToDNs(blockCollection);
-            this.storageMovementsMonitor.add(blockCollectionID,
-                allBlockLocsAttemptedToSatisfy);
+        if (!namesystem.isInSafeMode()) {
+          Long blockCollectionID = storageMovementNeeded.get();
+          if (blockCollectionID != null) {
+            BlockCollection blockCollection =
+                namesystem.getBlockCollection(blockCollectionID);
+            // Check blockCollectionId existence.
+            if (blockCollection != null) {
+              boolean allBlockLocsAttemptedToSatisfy =
+                  computeAndAssignStorageMismatchedBlocksToDNs(blockCollection);
+              this.storageMovementsMonitor
+                  .add(blockCollectionID, allBlockLocsAttemptedToSatisfy);
+            }
           }
         }
         // TODO: We can think to make this as configurable later, how frequently

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index f3572ff..7e48cde 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -2285,4 +2285,41 @@ public class DFSTestUtil {
     }
     return closedFiles;
   }
+
+  /**
+   * Check whether the Block movement has been successfully
+   * completed to satisfy the storage policy for the given file.
+   * @param fileName file name.
+   * @param expectedStorageType storage type.
+   * @param expectedStorageCount expected storage type.
+   * @param timeout timeout.
+   * @param fs distributedFileSystem.
+   * @throws Exception
+   */
+  public static void waitExpectedStorageType(String fileName,
+      final StorageType expectedStorageType, int expectedStorageCount,
+      int timeout, DistributedFileSystem fs) throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        final LocatedBlock lb;
+        try {
+          lb = fs.getClient().getLocatedBlocks(fileName, 0).get(0);
+        } catch (IOException e) {
+          LOG.error("Exception while getting located blocks", e);
+          return false;
+        }
+        int actualStorageCount = 0;
+        for(StorageType type : lb.getStorageTypes()) {
+          if (expectedStorageType == type) {
+            actualStorageCount++;
+          }
+        }
+        LOG.info(
+            expectedStorageType + " replica count, expected="
+                + expectedStorageCount + " and actual=" + actualStorageCount);
+        return expectedStorageCount == actualStorageCount;
+      }
+    }, 1000, timeout);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
new file mode 100644
index 0000000..e4b4290
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -0,0 +1,311 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.junit.Test;
+
+import java.io.IOException;
+
+/**
+ * Test persistence of satisfying files/directories.
+ */
+public class TestPersistentStoragePolicySatisfier {
+
+  private static Configuration conf;
+
+  private static MiniDFSCluster cluster;
+  private static DistributedFileSystem fs;
+
+  private static Path testFile =
+      new Path("/testFile");
+  private static String testFileName = testFile.toString();
+
+  private static Path parentDir = new Path("/parentDir");
+  private static Path parentFile = new Path(parentDir, "parentFile");
+  private static String parentFileName = parentFile.toString();
+  private static Path childDir = new Path(parentDir, "childDir");
+  private static Path childFile = new Path(childDir, "childFile");
+  private static String childFileName = childFile.toString();
+
+  private static final String COLD = "COLD";
+  private static final String WARM = "WARM";
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String ALL_SSD = "ALL_SSD";
+
+  private static StorageType[][] storageTypes = new StorageType[][] {
+      {StorageType.ARCHIVE, StorageType.DISK},
+      {StorageType.DISK, StorageType.SSD},
+      {StorageType.SSD, StorageType.RAM_DISK},
+      {StorageType.ARCHIVE, StorageType.DISK},
+      {StorageType.ARCHIVE, StorageType.SSD}
+  };
+
+  private final int timeout = 300000;
+
+  /**
+   * Setup environment for every test case.
+   * @throws IOException
+   */
+  public void clusterSetUp() throws Exception {
+    clusterSetUp(false);
+  }
+
+  /**
+   * Setup cluster environment.
+   * @param isHAEnabled if true, enable simple HA.
+   * @throws IOException
+   */
+  private void clusterSetUp(boolean isHAEnabled) throws Exception {
+    conf = new HdfsConfiguration();
+    final int dnNumber = storageTypes.length;
+    final short replication = 3;
+    MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf)
+        .storageTypes(storageTypes)
+        .numDataNodes(dnNumber);
+    if (isHAEnabled) {
+      clusterBuilder.nnTopology(MiniDFSNNTopology.simpleHATopology());
+    }
+    cluster = clusterBuilder.build();
+    cluster.waitActive();
+    if (isHAEnabled) {
+      cluster.transitionToActive(0);
+      fs = HATestUtil.configureFailoverFs(cluster, conf);
+    } else {
+      fs = cluster.getFileSystem();
+    }
+
+    createTestFiles(fs, replication);
+  }
+
+  /**
+   * Setup test files for testing.
+   * @param dfs
+   * @param replication
+   * @throws Exception
+   */
+  private void createTestFiles(DistributedFileSystem dfs,
+      short replication) throws Exception {
+    DFSTestUtil.createFile(dfs, testFile, 1024L, replication, 0L);
+    DFSTestUtil.createFile(dfs, parentFile, 1024L, replication, 0L);
+    DFSTestUtil.createFile(dfs, childFile, 1024L, replication, 0L);
+
+    DFSTestUtil.waitReplication(dfs, testFile, replication);
+    DFSTestUtil.waitReplication(dfs, parentFile, replication);
+    DFSTestUtil.waitReplication(dfs, childFile, replication);
+  }
+
+  /**
+   * Tear down environment for every test case.
+   * @throws IOException
+   */
+  private void clusterShutdown() throws IOException{
+    if(fs != null) {
+      fs.close();
+      fs = null;
+    }
+    if(cluster != null) {
+      cluster.shutdown(true);
+      cluster = null;
+    }
+  }
+
+  /**
+   * While satisfying file/directory, trigger the cluster's checkpoint to
+   * make sure satisfier persistence work as expected. This test case runs
+   * as below:
+   * 1. use satisfyStoragePolicy and add xAttr to the file.
+   * 2. do the checkpoint by secondary NameNode.
+   * 3. restart the cluster immediately.
+   * 4. make sure all the storage policies are satisfied.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testWithCheckpoint() throws Exception {
+    try {
+      clusterSetUp();
+      fs.setStoragePolicy(testFile, WARM);
+      fs.satisfyStoragePolicy(testFile);
+
+      // Start the checkpoint.
+      conf.set(
+          DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+      SecondaryNameNode secondary = new SecondaryNameNode(conf);
+      secondary.doCheckpoint();
+      restartCluster();
+
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.DISK, 1, timeout, fs);
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.ARCHIVE, 2, timeout, fs);
+
+      fs.setStoragePolicy(parentDir, COLD);
+      fs.satisfyStoragePolicy(parentDir);
+
+      DFSTestUtil.waitExpectedStorageType(
+          parentFileName, StorageType.ARCHIVE, 3, timeout, fs);
+      DFSTestUtil.waitExpectedStorageType(
+          childFileName, StorageType.DEFAULT, 3, timeout, fs);
+
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+  /**
+   * Tests to verify satisfier persistence working as expected
+   * in HA env. This test case runs as below:
+   * 1. setup HA cluster env with simple HA topology.
+   * 2. switch the active NameNode from nn0/nn1 to nn1/nn0.
+   * 3. make sure all the storage policies are satisfied.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testWithHA() throws Exception {
+    try {
+      // Enable HA env for testing.
+      clusterSetUp(true);
+
+      fs.setStoragePolicy(testFile, ALL_SSD);
+      fs.satisfyStoragePolicy(testFile);
+
+      cluster.transitionToStandby(0);
+      cluster.transitionToActive(1);
+
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.SSD, 3, timeout, fs);
+
+      // test directory
+      fs.setStoragePolicy(parentDir, WARM);
+      fs.satisfyStoragePolicy(parentDir);
+      cluster.transitionToStandby(1);
+      cluster.transitionToActive(0);
+
+      DFSTestUtil.waitExpectedStorageType(
+          parentFileName, StorageType.DISK, 1, timeout, fs);
+      DFSTestUtil.waitExpectedStorageType(
+          parentFileName, StorageType.ARCHIVE, 2, timeout, fs);
+      DFSTestUtil.waitExpectedStorageType(
+          childFileName, StorageType.DEFAULT, 3, timeout, fs);
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+
+  /**
+   * Tests to verify satisfier persistence working well with multiple
+   * restarts operations. This test case runs as below:
+   * 1. satisfy the storage policy of file1.
+   * 2. restart the cluster.
+   * 3. check whether all the blocks are satisfied.
+   * 4. satisfy the storage policy of file2.
+   * 5. restart the cluster.
+   * 6. check whether all the blocks are satisfied.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testWithRestarts() throws Exception {
+    try {
+      clusterSetUp();
+      fs.setStoragePolicy(testFile, ONE_SSD);
+      fs.satisfyStoragePolicy(testFile);
+      restartCluster();
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.SSD, 1, timeout, fs);
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.DISK, 2, timeout, fs);
+
+      // test directory
+      fs.setStoragePolicy(parentDir, COLD);
+      fs.satisfyStoragePolicy(parentDir);
+      restartCluster();
+      DFSTestUtil.waitExpectedStorageType(
+          parentFileName, StorageType.ARCHIVE, 3, timeout, fs);
+      DFSTestUtil.waitExpectedStorageType(
+          childFileName, StorageType.DEFAULT, 3, timeout, fs);
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+  /**
+   * Tests to verify satisfier persistence working well with
+   * federal HA env. This test case runs as below:
+   * 1. setup HA test environment with federal topology.
+   * 2. satisfy storage policy of file1.
+   * 3. switch active NameNode from nn0 to nn1.
+   * 4. switch active NameNode from nn2 to nn3.
+   * 5. check whether the storage policy of file1 is satisfied.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testWithFederationHA() throws Exception {
+    try {
+      conf = new HdfsConfiguration();
+      final MiniDFSCluster haCluster = new MiniDFSCluster
+          .Builder(conf)
+          .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
+          .storageTypes(storageTypes)
+          .numDataNodes(storageTypes.length).build();
+      haCluster.waitActive();
+      haCluster.transitionToActive(1);
+      haCluster.transitionToActive(3);
+
+      fs = HATestUtil.configureFailoverFs(haCluster, conf);
+      createTestFiles(fs, (short) 3);
+
+      fs.setStoragePolicy(testFile, WARM);
+      fs.satisfyStoragePolicy(testFile);
+
+      haCluster.transitionToStandby(1);
+      haCluster.transitionToActive(0);
+      haCluster.transitionToStandby(3);
+      haCluster.transitionToActive(2);
+
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.DISK, 1, timeout, fs);
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.ARCHIVE, 2, timeout, fs);
+
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+  /**
+   * Restart the hole env and trigger the DataNode's heart beats.
+   * @throws Exception
+   */
+  private void restartCluster() throws Exception {
+    cluster.restartDataNodes();
+    cluster.restartNameNodes();
+    cluster.waitActive();
+    cluster.triggerHeartbeats();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 9abb78d..1c53894 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -108,7 +108,8 @@ public class TestStoragePolicySatisfier {
 
       hdfsCluster.triggerHeartbeats();
       // Wait till namenode notified about the block location details
-      waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.ARCHIVE, 3, 30000, dfs);
     } finally {
       shutdownCluster();
     }
@@ -137,7 +138,8 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier Identified that block to move to SSD
       // areas
-      waitExpectedStorageType(file, StorageType.SSD, 3, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.SSD, 3, 30000, dfs);
     } finally {
       shutdownCluster();
     }
@@ -164,8 +166,10 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier Identified that block to move to SSD
       // areas
-      waitExpectedStorageType(file, StorageType.SSD, 1, 30000);
-      waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.DISK, 2, 30000, dfs);
     } finally {
       shutdownCluster();
     }
@@ -195,8 +199,10 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
 
       // Wait till the block is moved to SSD areas
-      waitExpectedStorageType(file, StorageType.SSD, 1, 30000);
-      waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.DISK, 2, 30000, dfs);
 
       waitForBlocksMovementResult(1, 30000);
     } finally {
@@ -245,8 +251,10 @@ public class TestStoragePolicySatisfier {
 
       for (String fileName : files) {
         // Wait till the block is moved to SSD areas
-        waitExpectedStorageType(fileName, StorageType.SSD, 1, 30000);
-        waitExpectedStorageType(fileName, StorageType.DISK, 2, 30000);
+        DFSTestUtil.waitExpectedStorageType(
+            fileName, StorageType.SSD, 1, 30000, dfs);
+        DFSTestUtil.waitExpectedStorageType(
+            fileName, StorageType.DISK, 2, 30000, dfs);
       }
 
       waitForBlocksMovementResult(blockCollectionIds.size(), 30000);
@@ -279,7 +287,8 @@ public class TestStoragePolicySatisfier {
 
       hdfsCluster.triggerHeartbeats();
       // Wait till namenode notified about the block location details
-      waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.ARCHIVE, 3, 30000, dfs);
     } finally {
       shutdownCluster();
     }
@@ -317,11 +326,14 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
 
       // take effect for the file in the directory.
-      waitExpectedStorageType(subFile1, StorageType.SSD, 1, 30000);
-      waitExpectedStorageType(subFile1, StorageType.DISK, 2, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          subFile1, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          subFile1, StorageType.DISK, 2, 30000, dfs);
 
       // take no effect for the sub-dir's file in the directory.
-      waitExpectedStorageType(subFile2, StorageType.DEFAULT, 3, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          subFile2, StorageType.DEFAULT, 3, 30000, dfs);
     } finally {
       shutdownCluster();
     }
@@ -367,6 +379,20 @@ public class TestStoragePolicySatisfier {
       } catch (FileNotFoundException e) {
 
       }
+
+      try {
+        hdfsAdmin.satisfyStoragePolicy(new Path(file));
+        hdfsAdmin.satisfyStoragePolicy(new Path(file));
+        Assert.fail(String.format(
+            "Should failed to satisfy storage policy "
+            + "for %s ,since it has been "
+            + "added to satisfy movement queue.", file));
+      } catch (IOException e) {
+        GenericTestUtils.assertExceptionContains(
+            String.format("Cannot request to call satisfy storage policy "
+                + "on path %s, as this file/dir was already called for "
+                + "satisfying storage policy.", file), e);
+      }
     } finally {
       shutdownCluster();
     }
@@ -407,8 +433,10 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier identified that block to move to
       // ARCHIVE area.
-      waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000);
-      waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.ARCHIVE, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.DISK, 2, 30000, dfs);
 
       waitForBlocksMovementResult(1, 30000);
     } finally {
@@ -451,7 +479,8 @@ public class TestStoragePolicySatisfier {
       // No block movement will be scheduled as there is no target node available
       // with the required storage type.
       waitForAttemptedItems(1, 30000);
-      waitExpectedStorageType(file, StorageType.DISK, 3, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.DISK, 3, 30000, dfs);
       // Since there is no target node the item will get timed out and then
       // re-attempted.
       waitForAttemptedItems(1, 30000);
@@ -523,8 +552,10 @@ public class TestStoragePolicySatisfier {
     // with the required storage type.
     waitForAttemptedItems(1, 30000);
     waitForBlocksMovementResult(1, 30000);
-    waitExpectedStorageType(file1, StorageType.ARCHIVE, 1, 30000);
-    waitExpectedStorageType(file1, StorageType.DISK, 2, 30000);
+    DFSTestUtil.waitExpectedStorageType(
+        file1, StorageType.ARCHIVE, 1, 30000, dfs);
+    DFSTestUtil.waitExpectedStorageType(
+        file1, StorageType.DISK, 2, 30000, dfs);
   }
 
   /**
@@ -571,8 +602,10 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier identified that block to move to
       // ARCHIVE area.
-      waitExpectedStorageType(file, StorageType.ARCHIVE, 2, 30000);
-      waitExpectedStorageType(file, StorageType.DISK, 3, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.ARCHIVE, 2, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.DISK, 3, 30000, dfs);
 
       waitForBlocksMovementResult(1, 30000);
     } finally {
@@ -606,8 +639,10 @@ public class TestStoragePolicySatisfier {
 
       namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
       hdfsCluster.triggerHeartbeats();
-      waitExpectedStorageType(file, StorageType.SSD, 1, 30000);
-      waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.DISK, 2, 30000, dfs);
 
     } finally {
       shutdownCluster();
@@ -644,8 +679,10 @@ public class TestStoragePolicySatisfier {
       namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
       hdfsCluster.triggerHeartbeats();
 
-      waitExpectedStorageType(file, StorageType.DISK, 1, 30000);
-      waitExpectedStorageType(file, StorageType.ARCHIVE, 2, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.DISK, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          file, StorageType.ARCHIVE, 2, 30000, dfs);
     } finally {
       shutdownCluster();
     }
@@ -771,33 +808,4 @@ public class TestStoragePolicySatisfier {
     cluster.waitActive();
     return cluster;
   }
-
-  // Check whether the Block movement has been successfully completed to satisfy
-  // the storage policy for the given file.
-  private void waitExpectedStorageType(final String fileName,
-      final StorageType expectedStorageType, int expectedStorageCount,
-      int timeout) throws Exception {
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        LocatedBlock lb = null;
-        try {
-          lb = dfs.getClient().getLocatedBlocks(fileName, 0).get(0);
-        } catch (IOException e) {
-          LOG.error("Exception while getting located blocks", e);
-          return false;
-        }
-        int actualStorageCount = 0;
-        for (StorageType storageType : lb.getStorageTypes()) {
-          if (expectedStorageType == storageType) {
-            actualStorageCount++;
-          }
-        }
-        LOG.info(
-            expectedStorageType + " replica count, expected={} and actual={}",
-            expectedStorageType, actualStorageCount);
-        return expectedStorageCount == actualStorageCount;
-      }
-    }, 100, timeout);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e4de0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index 5f8639f..eb4a6a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -66,7 +66,7 @@ public class TestStoragePolicySatisfierWithStripedFile {
   private int defaultStripeBlockSize;
 
   private ErasureCodingPolicy getEcPolicy() {
-    return ErasureCodingPolicyManager.getSystemDefaultPolicy();
+    return StripedFileTestUtil.getDefaultECPolicy();
   }
 
   /**
@@ -99,6 +99,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
     }
 
     final Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+        StripedFileTestUtil.getDefaultECPolicy().getName());
     initConfWithStripe(conf, defaultStripeBlockSize);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(numOfDatanodes)
@@ -128,7 +130,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
       client.mkdirs(barDir, new FsPermission((short) 777), true);
       client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
       // set an EC policy on "/bar" directory
-      client.setErasureCodingPolicy(barDir, null);
+      client.setErasureCodingPolicy(barDir,
+          StripedFileTestUtil.getDefaultECPolicy().getName());
 
       // write file to barDir
       final String fooFile = "/bar/foo";
@@ -206,6 +209,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
     }
 
     final Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+        StripedFileTestUtil.getDefaultECPolicy().getName());
     initConfWithStripe(conf, defaultStripeBlockSize);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(numOfDatanodes)
@@ -235,7 +240,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
       client.mkdirs(barDir, new FsPermission((short) 777), true);
       client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
       // set an EC policy on "/bar" directory
-      client.setErasureCodingPolicy(barDir, null);
+      client.setErasureCodingPolicy(barDir,
+          StripedFileTestUtil.getDefaultECPolicy().getName());
 
       // write file to barDir
       final String fooFile = "/bar/foo";
@@ -314,6 +320,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
     }
 
     final Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+        StripedFileTestUtil.getDefaultECPolicy().getName());
     initConfWithStripe(conf, defaultStripeBlockSize);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(numOfDatanodes)
@@ -343,7 +351,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
       client.mkdirs(barDir, new FsPermission((short) 777), true);
       client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
       // set an EC policy on "/bar" directory
-      client.setErasureCodingPolicy(barDir, null);
+      client.setErasureCodingPolicy(barDir,
+          StripedFileTestUtil.getDefaultECPolicy().getName());
 
       // write file to barDir
       final String fooFile = "/bar/foo";


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: YARN-6999. Add log about how to solve Error: Could not find or load main class org.apache.hadoop.mapreduce.v2.app.MRAppMaster. (Linlin Zhou via gtcarrera9)

Posted by ra...@apache.org.
YARN-6999. Add log about how to solve Error: Could not find or load main class org.apache.hadoop.mapreduce.v2.app.MRAppMaster. (Linlin Zhou via gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bee7046
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bee7046
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bee7046

Branch: refs/heads/HDFS-10285
Commit: 3bee7046c52bff681a5aecc956ba9be6c3543397
Parents: b89ffcf
Author: Li Lu <gt...@apache.org>
Authored: Fri Aug 25 22:43:25 2017 -0700
Committer: Li Lu <gt...@apache.org>
Committed: Fri Aug 25 22:43:25 2017 -0700

----------------------------------------------------------------------
 .../launcher/ContainerLaunch.java               | 27 +++++++++++++++++++-
 1 file changed, 26 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bee7046/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index a0055c5..3f50c3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -564,9 +564,11 @@ public class ContainerLaunch implements Callable<Integer> {
         errorFileIS = fileSystem.open(errorFile);
         errorFileIS.readFully(startPosition, tailBuffer);
 
+        String tailBufferMsg = new String(tailBuffer, StandardCharsets.UTF_8);
         diagnosticInfo.append("Last ").append(tailSizeInBytes)
             .append(" bytes of ").append(errorFile.getName()).append(" :\n")
-            .append(new String(tailBuffer, StandardCharsets.UTF_8));
+            .append(tailBufferMsg).append("\n")
+            .append(analysesErrorMsgOfContainerExitWithFailure(tailBufferMsg));
       }
     } catch (IOException e) {
       LOG.error("Failed to get tail of the container's error log file", e);
@@ -580,6 +582,29 @@ public class ContainerLaunch implements Callable<Integer> {
             diagnosticInfo.toString()));
   }
 
+  private String analysesErrorMsgOfContainerExitWithFailure(String errorMsg) {
+    StringBuilder analysis = new StringBuilder();
+    if (errorMsg.indexOf("Error: Could not find or load main class"
+        + " org.apache.hadoop.mapreduce") != -1) {
+      analysis.append("Please check whether your etc/hadoop/mapred-site.xml "
+          + "contains the below configuration:\n");
+      analysis.append("<property>\n")
+          .append("  <name>yarn.app.mapreduce.am.env</name>\n")
+          .append("  <value>HADOOP_MAPRED_HOME=${full path of your hadoop "
+              + "distribution directory}</value>\n")
+          .append("</property>\n<property>\n")
+          .append("  <name>mapreduce.map.env</name>\n")
+          .append("  <value>HADOOP_MAPRED_HOME=${full path of your hadoop "
+              + "distribution directory}</value>\n")
+          .append("</property>\n<property>\n")
+          .append("  <name>mapreduce.reduce.e nv</name>\n")
+          .append("  <value>HADOOP_MAPRED_HOME=${full path of your hadoop "
+              + "distribution directory}</value>\n")
+          .append("</property>\n");
+    }
+    return analysis.toString();
+  }
+
   protected String getPidFileSubpath(String appIdStr, String containerIdStr) {
     return getContainerPrivateDir(appIdStr, containerIdStr) + Path.SEPARATOR
         + String.format(ContainerLaunch.PID_FILE_NAME_FMT, containerIdStr);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDFS-12303. Change default EC cell size to 1MB for better performance. Contributed by Wei Zhou.

Posted by ra...@apache.org.
HDFS-12303. Change default EC cell size to 1MB for better performance. Contributed by Wei Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f29a0fc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f29a0fc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f29a0fc2

Branch: refs/heads/HDFS-10285
Commit: f29a0fc288a625522ba910e61b63fd5f10418b3d
Parents: e864f81
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Aug 25 14:14:23 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri Aug 25 14:14:23 2017 -0700

----------------------------------------------------------------------
 .../hdfs/client/HdfsClientConfigKeys.java       |  2 +-
 .../protocol/SystemErasureCodingPolicies.java   |  4 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../src/main/resources/hdfs-default.xml         |  2 +-
 .../src/site/markdown/HDFSErasureCoding.md      |  8 +-
 .../apache/hadoop/cli/TestErasureCodingCLI.java |  2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 14 ++-
 .../blockmanagement/TestBlockManager.java       | 89 ++++++++++----------
 .../test/resources/testErasureCodingConf.xml    | 86 +++++++++----------
 .../src/test/resources/test_ec_policies.xml     |  2 +-
 .../hadoop/tools/util/TestDistCpUtils.java      | 26 +++---
 11 files changed, 123 insertions(+), 114 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 5667989..e99b099 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -389,7 +389,7 @@ public interface HdfsClientConfigKeys {
 
     String  THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size";
     /**
-     * With default RS-6-3-64k erasure coding policy, each normal read could
+     * With default RS-6-3-1024k erasure coding policy, each normal read could
      * span 6 DNs, so this default value accommodates 3 read streams
      */
     int     THREADPOOL_SIZE_DEFAULT = 18;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
index f0efe76..c079fea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
@@ -40,8 +40,8 @@ public final class SystemErasureCodingPolicies {
   // Private constructor, this is a utility class.
   private SystemErasureCodingPolicies() {}
 
-  // 64 KB
-  private static final int DEFAULT_CELLSIZE = 64 * 1024;
+  // 1 MB
+  private static final int DEFAULT_CELLSIZE = 1024 * 1024;
 
   public static final byte RS_6_3_POLICY_ID = 1;
   private static final ErasureCodingPolicy SYS_POLICY1 =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7f60000..b836ff8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -567,7 +567,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY =
       "dfs.namenode.ec.system.default.policy";
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT =
-      "RS-6-3-64k";
+      "RS-6-3-1024k";
   public static final String  DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = "dfs.datanode.ec.reconstruction.stripedread.threads";
   public static final int     DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = "dfs.datanode.ec.reconstruction.stripedread.buffer.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index aedc7e8..2a42b5c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3029,7 +3029,7 @@
 
 <property>
   <name>dfs.namenode.ec.system.default.policy</name>
-  <value>RS-6-3-64k</value>
+  <value>RS-6-3-1024k</value>
   <description>The default erasure coding policy name will be used
     on the path if no policy name is passed.
   </description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 786b512..a9c0f3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -65,7 +65,7 @@ Architecture
 
       2. _The size of a striping cell._ This determines the granularity of striped reads and writes, including buffer sizes and encoding work.
 
-    Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, six built-in policies are supported: `RS-3-2-64k`, `RS-6-3-64k`, `RS-10-4-64k`, `RS-LEGACY-6-3-64k`, `XOR-2-1-64k` and `REPLICATION`.
+    Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, six built-in policies are supported: `RS-3-2-1024k`, `RS-6-3-1024k`, `RS-10-4-1024k`, `RS-LEGACY-6-3-1024k`, `XOR-2-1-1024k` and `REPLICATION`.
 
     `REPLICATION` is a special policy. It can only be set on directory, to force the directory to adopt 3x replication scheme, instead of inheriting its ancestor's erasure coding policy. This policy makes it possible to interleave 3x replication scheme directory with erasure coding directory.
 
@@ -115,14 +115,14 @@ Deployment
 
   By default, all built-in erasure coding policies are disabled. Typically, the cluster administrator will enable set of policies by including them
   in the `dfs.namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance,
-  for a cluster with 9 racks, a policy like `RS-10-4-64k` will not preserve rack-level fault-tolerance, and `RS-6-3-64k` or `RS-3-2-64k` might
-  be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-64k` would still be appropriate as long as
+  for a cluster with 9 racks, a policy like `RS-10-4-1024k` will not preserve rack-level fault-tolerance, and `RS-6-3-1024k` or `RS-3-2-1024k` might
+  be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-1024k` would still be appropriate as long as
   there are at least 14 DataNodes in the cluster.
 
   A system default EC policy can be configured via 'dfs.namenode.ec.system.default.policy' configuration. With this configuration,
   the default EC policy will be used when no policy name is passed as an argument in the '-setPolicy' command.
 
-  By default, the 'dfs.namenode.ec.system.default.policy' is "RS-6-3-64k".
+  By default, the 'dfs.namenode.ec.system.default.policy' is "RS-6-3-1024k".
 
   The codec implementations for Reed-Solomon and XOR can be configured with the following client and DataNode configuration keys:
   `io.erasurecode.codec.rs.rawcoders` for the default RS codec,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
index 7181968..60f4f56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
@@ -46,7 +46,7 @@ public class TestErasureCodingCLI extends CLITestHelper {
     super.setUp();
 
     conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-        "RS-6-3-64k,RS-3-2-64k,XOR-2-1-64k");
+        "RS-6-3-1024k,RS-3-2-1024k,XOR-2-1-1024k");
 
     dfsCluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(NUM_OF_DATANODES).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 47cdf23..c0946f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -600,7 +600,17 @@ public class TestErasureCodingPolicies {
     fs.mkdirs(dirPath);
     fs.setErasureCodingPolicy(dirPath, ecPolicy.getName());
 
-    final String ecPolicyName = "RS-10-4-64k";
+    String ecPolicyName = null;
+    Collection<ErasureCodingPolicy> allPolicies =
+        fs.getAllErasureCodingPolicies();
+    for (ErasureCodingPolicy policy : allPolicies) {
+      if (!ecPolicy.equals(policy)) {
+        ecPolicyName = policy.getName();
+        break;
+      }
+    }
+    assertNotNull(ecPolicyName);
+
     fs.createFile(filePath).build().close();
     assertEquals(ecPolicy, fs.getErasureCodingPolicy(filePath));
     fs.delete(filePath, true);
@@ -704,7 +714,7 @@ public class TestErasureCodingPolicies {
 
     // Test add policy successfully
     newPolicy =
-        new ErasureCodingPolicy(toAddSchema, 1 * 1024 * 1024);
+        new ErasureCodingPolicy(toAddSchema, 4 * 1024 * 1024);
     policyArray  = new ErasureCodingPolicy[]{newPolicy};
     responses = fs.addErasureCodingPolicies(policyArray);
     assertEquals(1, responses.length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 42aeadf..4092e5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -17,45 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-
-import java.io.BufferedReader;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.InputStreamReader;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.BrokenBarrierException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.LinkedListMultimap;
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -80,8 +45,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
@@ -115,10 +80,44 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.LinkedListMultimap;
-import com.google.common.collect.Lists;
+import java.io.BufferedReader;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
 
 public class TestBlockManager {
   private DatanodeStorageInfo[] storages;
@@ -1365,7 +1364,7 @@ public class TestBlockManager {
         .getNumDataUnits();
     final int numParityBlocks = StripedFileTestUtil.getDefaultECPolicy()
         .getNumParityUnits();
-    final long blockSize = 64 * 1024;
+    final long blockSize = 6 * 1024 * 1024;
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 1baf355..1613323 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -206,7 +206,7 @@
       <description>setPolicy : set erasure coding policy on a directory to encode files</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -214,7 +214,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Set erasure coding policy RS-6-3-64k on /ecdir</expected-output>
+          <expected-output>Set erasure coding policy RS-6-3-1024k on /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -223,8 +223,8 @@
       <description>setPolicy : set a policy twice</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -232,7 +232,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Set erasure coding policy RS-6-3-64k on /ecdir</expected-output>
+          <expected-output>Set erasure coding policy RS-6-3-1024k on /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -241,7 +241,7 @@
       <description>setPolicy : set replication policy on a directory</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <command>-fs NAMENODE -mkdir /ecdir/replica</command>
         <ec-admin-command>-fs NAMENODE -setPolicy -replicate -path /ecdir/replica</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/replica/file</command>
@@ -264,7 +264,7 @@
       <description>unsetPolicy : unset policy and get</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -unsetPolicy -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
@@ -283,8 +283,8 @@
       <description>setPolicy : change different policy and get</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-3-2-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-3-2-1024k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
@@ -293,7 +293,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-3-2-64k</expected-output>
+          <expected-output>RS-3-2-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -303,7 +303,7 @@
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
         <command>-fs NAMENODE -mkdir /ecdir/child</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -unsetPolicy -path /ecdir/child</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/child/ecfile</command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/child/ecfile</ec-admin-command>
@@ -316,7 +316,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-6-3-64k</expected-output>
+          <expected-output>RS-6-3-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -342,7 +342,7 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
@@ -351,7 +351,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-6-3-64k</expected-output>
+          <expected-output>RS-6-3-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -360,7 +360,7 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/ecfile</command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/ecfile</ec-admin-command>
       </test-commands>
@@ -371,7 +371,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-6-3-64k</expected-output>
+          <expected-output>RS-6-3-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -405,7 +405,7 @@
         </comparator>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Add ErasureCodingPolicy RS-6-3-64k failed</expected-output>
+          <expected-output>Add ErasureCodingPolicy RS-6-3-1024k failed</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -413,14 +413,14 @@
     <test>
       <description>enablePolicy : enable the erasure coding policy</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Erasure coding policy RS-6-3-64k is enabled</expected-output>
+          <expected-output>Erasure coding policy RS-6-3-1024k is enabled</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -428,15 +428,15 @@
     <test>
       <description>enablePolicy : enable the erasure coding policy twice</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Erasure coding policy RS-6-3-64k is enabled</expected-output>
+          <expected-output>Erasure coding policy RS-6-3-1024k is enabled</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -444,15 +444,15 @@
     <test>
       <description>disablePolicy : disable the erasure coding policy</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Erasure coding policy RS-6-3-64k is disabled</expected-output>
+          <expected-output>Erasure coding policy RS-6-3-1024k is disabled</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -460,16 +460,16 @@
     <test>
       <description>disablePolicy : disable the erasure coding policy twice</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-1024k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Erasure coding policy RS-6-3-64k is disabled</expected-output>
+          <expected-output>Erasure coding policy RS-6-3-1024k is disabled</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -513,7 +513,7 @@
       <description>setPolicy : illegal parameters - too many arguments case 1</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-64k /ecdir2</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-1024k /ecdir2</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -530,7 +530,7 @@
       <description>setPolicy : illegal parameters - too many arguments case 2</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-64k -replicate /ecdir2</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-1024k -replicate /ecdir2</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -561,10 +561,10 @@
     </test>
 
     <test>
-      <description>setPolicy : illegal parameters - RS-10-4-64k</description>
+      <description>setPolicy : illegal parameters - RS-10-4-1024k</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-10-4-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-10-4-1024k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -572,7 +572,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Policy 'RS-10-4-64k' does not match any enabled erasure coding policies</expected-output>
+          <expected-output>Policy 'RS-10-4-1024k' does not match any enabled erasure coding policies</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -580,7 +580,7 @@
     <test>
       <description>setPolicy : illegal parameters - no such file</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -610,7 +610,7 @@
     <test>
       <description>setPolicy : illegal parameters - replicate and policy coexist</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-64k -replicate</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-1024k -replicate</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -652,7 +652,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-6-3-64k</expected-output>
+          <expected-output>RS-6-3-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -751,7 +751,7 @@
     <test>
       <description>enablePolicy : illegal parameters - policy is missing</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -766,7 +766,7 @@
     <test>
       <description>enablePolicy : illegal parameters - too many parameters</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k RS-3-2-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k RS-3-2-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -781,7 +781,7 @@
     <test>
       <description>disablePolicy : illegal parameters - policy is missing</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -disablePolicy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -796,7 +796,7 @@
     <test>
       <description>disablePolicy : illegal parameters - too many parameters</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k RS-3-2-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-1024k RS-3-2-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -842,7 +842,7 @@
       <description>count: file using absolute path with option -e to show erasurecoding policy of a directory</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /dir1</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /dir1 -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /dir1 -policy RS-6-3-1024k</ec-admin-command>
         <command>-fs NAMENODE -touchz /dir1/file1</command>
         <command>-fs NAMENODE -touchz /dir1/file2</command>
         <command>-fs NAMENODE -count -e -v /dir1</command>
@@ -883,7 +883,7 @@
       <description>ls: file using absolute path and option -e to show erasure coding policy of a directory</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir -p /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-6-3-1024k</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/file1</command>
         <command>-fs NAMENODE -touchz /ecdir/file2</command>
         <command>-fs NAMENODE -touchz /ecdir/file3</command>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml
index b2416ac..6d5bb28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml
@@ -59,7 +59,7 @@
   </policy>
   <policy>
     <schema>RSk6m3</schema>
-    <cellsize>65536</cellsize>
+    <cellsize>1048576</cellsize>
   </policy>
 </policies>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f29a0fc2/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
index c42e546..f626de4 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
@@ -18,16 +18,6 @@
 
 package org.apache.hadoop.tools.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.EnumSet;
-import java.util.Random;
-import java.util.Stack;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -49,6 +39,16 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.EnumSet;
+import java.util.Random;
+import java.util.Stack;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 public class TestDistCpUtils {
   private static final Log LOG = LogFactory.getLog(TestDistCpUtils.class);
 
@@ -61,7 +61,7 @@ public class TestDistCpUtils {
   @BeforeClass
   public static void create() throws IOException {
     config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-        "XOR-2-1-64k");
+        "XOR-2-1-1024k");
     cluster = new MiniDFSCluster.Builder(config)
         .numDataNodes(2)
         .format(true)
@@ -562,7 +562,7 @@ public class TestDistCpUtils {
     fs.mkdirs(srcECDir);
     fs.mkdirs(dstReplDir);
     String[] args = {"-setPolicy", "-path", "/tmp/srcECDir",
-        "-policy", "XOR-2-1-64k"};
+        "-policy", "XOR-2-1-1024k"};
     int res = ToolRunner.run(config, new ECAdmin(config), args);
     assertEquals("Setting EC policy should succeed!", 0, res);
     verifyReplFactorNotPreservedOnErasureCodedFile(srcECFile, true,
@@ -577,7 +577,7 @@ public class TestDistCpUtils {
     fs.mkdirs(srcReplDir);
     fs.mkdirs(dstECDir);
     args = new String[]{"-setPolicy", "-path", "/tmp/dstECDir",
-        "-policy", "XOR-2-1-64k"};
+        "-policy", "XOR-2-1-1024k"};
     res = ToolRunner.run(config, new ECAdmin(config), args);
     assertEquals("Setting EC policy should succeed!", 0, res);
     verifyReplFactorNotPreservedOnErasureCodedFile(srcReplFile,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HDFS-11123. [SPS] Make storage policy satisfier daemon work on/off dynamically. Contributed by Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-11123. [SPS] Make storage policy satisfier daemon work on/off dynamically. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98ced53e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98ced53e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98ced53e

Branch: refs/heads/HDFS-10285
Commit: 98ced53e98e481327a5d3de00e1c1046d1757a0b
Parents: cf8df29
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Wed Dec 14 17:49:44 2016 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:54:27 2017 +0530

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |  45 +++++++++
 .../BlockStorageMovementAttemptedItems.java     |  24 +++--
 .../namenode/BlockStorageMovementNeeded.java    |   4 +
 .../hdfs/server/namenode/FSDirAttrOp.java       |   8 --
 .../hdfs/server/namenode/FSNamesystem.java      |  34 ++++++-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  26 ++++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   4 +-
 .../server/namenode/StoragePolicySatisfier.java |  45 +++++++--
 .../src/main/resources/hdfs-default.xml         |   7 +-
 .../namenode/TestNameNodeReconfigure.java       | 100 +++++++++++++++++++
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |   2 +-
 11 files changed, 264 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ca978ab..bfebde4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4878,7 +4878,52 @@ public class BlockManager implements BlockStatsMXBean {
     }
   }
 
+  /**
+   * Gets the storage policy satisfier instance.
+   *
+   * @return sps
+   */
   public StoragePolicySatisfier getStoragePolicySatisfier() {
     return sps;
   }
+
+  /**
+   * Activate the storage policy satisfier by starting its service.
+   */
+  public void activateSPS() {
+    if (sps == null) {
+      LOG.info("Storage policy satisfier is not initialized.");
+      return;
+    } else if (sps.isRunning()) {
+      LOG.info("Storage policy satisfier is already running.");
+      return;
+    }
+    sps.start();
+  }
+
+  /**
+   * Deactivate the storage policy satisfier by stopping its services.
+   */
+  public void deactivateSPS() {
+    if (sps == null) {
+      LOG.info("Storage policy satisfier is not initialized.");
+      return;
+    } else if (!sps.isRunning()) {
+      LOG.info("Storage policy satisfier is already stopped.");
+      return;
+    }
+    sps.stop();
+    // TODO: add command to DNs for stop in-progress processing SPS commands?
+    // to avoid confusions in cluster, I think sending commands from centralized
+    // place would be better to drop pending queues at DN. Anyway in progress
+    // work will be finished in a while, but this command can void starting
+    // fresh movements at DN.
+  }
+
+  /**
+   * @return True if storage policy satisfier running.
+   */
+  public boolean isStoragePolicySatisfierRunning() {
+    return sps == null ? false : sps.isRunning();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 5457dc2..bb26082 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -49,7 +49,7 @@ public class BlockStorageMovementAttemptedItems {
   // processing and sent to DNs.
   private final Map<Long, Long> storageMovementAttemptedItems;
   private final List<BlocksStorageMovementResult> storageMovementAttemptedResults;
-  private volatile boolean spsRunning = true;
+  private volatile boolean monitorRunning = true;
   private Daemon timerThread = null;
   //
   // It might take anywhere between 30 to 60 minutes before
@@ -109,7 +109,8 @@ public class BlockStorageMovementAttemptedItems {
   /**
    * Starts the monitor thread.
    */
-  void start() {
+  public synchronized void start() {
+    monitorRunning = true;
     timerThread = new Daemon(new BlocksStorageMovementAttemptResultMonitor());
     timerThread.setName("BlocksStorageMovementAttemptResultMonitor");
     timerThread.start();
@@ -118,8 +119,14 @@ public class BlockStorageMovementAttemptedItems {
   /**
    * Stops the monitor thread.
    */
-  public void stop() {
-    spsRunning = false;
+  public synchronized void stop() {
+    monitorRunning = false;
+    timerThread.interrupt();
+    try {
+      timerThread.join(3000);
+    } catch (InterruptedException ie) {
+    }
+    this.clearQueues();
   }
 
   /**
@@ -129,13 +136,13 @@ public class BlockStorageMovementAttemptedItems {
   private class BlocksStorageMovementAttemptResultMonitor implements Runnable {
     @Override
     public void run() {
-      while (spsRunning) {
+      while (monitorRunning) {
         try {
           blockStorageMovementResultCheck();
           blocksStorageMovementUnReportedItemsCheck();
           Thread.sleep(checkTimeout);
         } catch (InterruptedException ie) {
-          LOG.debug("BlocksStorageMovementAttemptResultMonitor thread "
+          LOG.info("BlocksStorageMovementAttemptResultMonitor thread "
               + "is interrupted.", ie);
         }
       }
@@ -222,4 +229,9 @@ public class BlockStorageMovementAttemptedItems {
   public int getAttemptedItemsCount() {
     return storageMovementAttemptedItems.size();
   }
+
+  public void clearQueues() {
+    storageMovementAttemptedResults.clear();
+    storageMovementAttemptedItems.clear();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
index c916672..3241e6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
@@ -50,4 +50,8 @@ public class BlockStorageMovementNeeded {
   public synchronized Long get() {
     return storageMovementNeeded.poll();
   }
+
+  public synchronized void clearAll() {
+    storageMovementNeeded.clear();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index a80f751..d7f40e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -200,14 +200,6 @@ public class FSDirAttrOp {
   static void satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
       String src) throws IOException {
 
-    // make sure storage policy is enabled, otherwise
-    // there is no need to satisfy storage policy.
-    if (!fsd.isStoragePolicyEnabled()) {
-      throw new IOException(String.format(
-          "Failed to satisfy storage policy since %s is set to false.",
-          DFS_STORAGE_POLICY_ENABLED_KEY));
-    }
-
     FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c851b30..0122f3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -87,6 +87,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 
 import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
@@ -2149,6 +2150,22 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot satisfy storage policy for " + src);
+      // make sure storage policy is enabled, otherwise
+      // there is no need to satisfy storage policy.
+      if (!dir.isStoragePolicyEnabled()) {
+        throw new IOException(String.format(
+            "Failed to satisfy storage policy since %s is set to false.",
+            DFS_STORAGE_POLICY_ENABLED_KEY));
+      }
+
+      if (blockManager.getStoragePolicySatisfier() == null
+          || !blockManager.getStoragePolicySatisfier().isRunning()) {
+        throw new UnsupportedActionException(
+            "Cannot request to satisfy storage policy "
+                + "when storage policy satisfier feature has been deactivated"
+                + " by admin. Seek for an admin help to activate it "
+                + "or use Mover tool.");
+      }
       // TODO: need to update editlog for persistence.
       FSDirAttrOp.satisfyStoragePolicy(dir, blockManager, src);
     } finally {
@@ -3767,11 +3784,18 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         blockReportLeaseId =  blockManager.requestBlockReportLeaseId(nodeReg);
       }
 
-      // TODO: Handle blocks movement results send by the coordinator datanode.
-      // This has to be revisited as part of HDFS-11029.
-      if (blockManager.getStoragePolicySatisfier() != null) {
-        blockManager.getStoragePolicySatisfier()
-            .handleBlocksStorageMovementResults(blksMovementResults);
+      // Handle blocks movement results sent by the coordinator datanode.
+      StoragePolicySatisfier sps = blockManager.getStoragePolicySatisfier();
+      if (sps != null) {
+        if (!sps.isRunning()) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(
+                "Storage policy satisfier is not running. So, ignoring block "
+                    + "storage movement results sent by co-ordinator datanode");
+          }
+        } else {
+          sps.handleBlocksStorageMovementResults(blksMovementResults);
+        }
       }
 
       //create ha status

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 79bbbc5..27c07e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -157,6 +157,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAUL
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE;
@@ -288,7 +289,8 @@ public class NameNode extends ReconfigurableBase implements
           DFS_HEARTBEAT_INTERVAL_KEY,
           DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
           FS_PROTECTED_DIRECTORIES,
-          HADOOP_CALLER_CONTEXT_ENABLED_KEY));
+          HADOOP_CALLER_CONTEXT_ENABLED_KEY,
+          DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY));
 
   private static final String USAGE = "Usage: hdfs namenode ["
       + StartupOption.BACKUP.getName() + "] | \n\t["
@@ -2034,6 +2036,8 @@ public class NameNode extends ReconfigurableBase implements
       return reconfCallerContextEnabled(newVal);
     } else if (property.equals(ipcClientRPCBackoffEnable)) {
       return reconfigureIPCBackoffEnabled(newVal);
+    } else if (property.equals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY)) {
+      return reconfigureSPSActivate(newVal, property);
     } else {
       throw new ReconfigurationException(property, newVal, getConf().get(
           property));
@@ -2117,6 +2121,26 @@ public class NameNode extends ReconfigurableBase implements
     return Boolean.toString(clientBackoffEnabled);
   }
 
+  String reconfigureSPSActivate(String newVal, String property)
+      throws ReconfigurationException {
+    if (newVal == null || !(newVal.equalsIgnoreCase(Boolean.TRUE.toString())
+        || newVal.equalsIgnoreCase(Boolean.FALSE.toString()))) {
+      throw new ReconfigurationException(property, newVal,
+          getConf().get(property),
+          new HadoopIllegalArgumentException(
+              "For activating or deactivating storage policy satisfier, "
+                  + "we must pass true/false only"));
+    }
+
+    boolean activateSPS = Boolean.parseBoolean(newVal);
+    if (activateSPS) {
+      namesystem.getBlockManager().activateSPS();
+    } else {
+      namesystem.getBlockManager().deactivateSPS();
+    }
+    return newVal;
+  }
+
   @Override  // ReconfigurableBase
   protected Configuration getNewConf() {
     return new HdfsConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 1f782e4..8d95626 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -2401,8 +2401,6 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     if (nn.isStandbyState()) {
       throw new StandbyException("Not supported by Standby Namenode.");
     }
-    StoragePolicySatisfier sps = namesystem.getBlockManager()
-        .getStoragePolicySatisfier();
-    return sps != null && sps.isRunning();
+    return namesystem.getBlockManager().isStoragePolicySatisfierRunning();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index cc2ca7d..56a531f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -91,7 +91,9 @@ public class StoragePolicySatisfier implements Runnable {
    * Start storage policy satisfier demon thread. Also start block storage
    * movements monitor for retry the attempts if needed.
    */
-  public void start() {
+  public synchronized void start() {
+    isRunning = true;
+    LOG.info("Starting StoragePolicySatisfier.");
     storagePolicySatisfierThread = new Daemon(this);
     storagePolicySatisfierThread.setName("StoragePolicySatisfier");
     storagePolicySatisfierThread.start();
@@ -101,8 +103,9 @@ public class StoragePolicySatisfier implements Runnable {
   /**
    * Stop storage policy satisfier demon thread.
    */
-  public void stop() {
+  public synchronized void stop() {
     isRunning = false;
+    LOG.info("Stopping StoragePolicySatisfier.");
     if (storagePolicySatisfierThread == null) {
       return;
     }
@@ -112,6 +115,7 @@ public class StoragePolicySatisfier implements Runnable {
     } catch (InterruptedException ie) {
     }
     this.storageMovementsMonitor.stop();
+    this.clearQueues();
   }
 
   /**
@@ -141,14 +145,20 @@ public class StoragePolicySatisfier implements Runnable {
 
   @Override
   public void run() {
-    isRunning = !checkIfMoverRunning();
-    if (!isRunning) {
-      LOG.error("StoragePolicySatisfier thread stopped "
-          + "as Mover ID file " + HdfsServerConstants.MOVER_ID_PATH.toString()
-          + " exists");
-      return;
+    boolean isMoverRunning = !checkIfMoverRunning();
+    synchronized (this) {
+      isRunning = isMoverRunning;
+      if (!isRunning) {
+        // Stopping monitor thread and clearing queues as well
+        this.clearQueues();
+        this.storageMovementsMonitor.stop();
+        LOG.error(
+            "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
+                + HdfsServerConstants.MOVER_ID_PATH.toString() + " exists");
+        return;
+      }
     }
-    while (namesystem.isRunning()) {
+    while (namesystem.isRunning() && isRunning) {
       try {
         Long blockCollectionID = storageMovementNeeded.get();
         if (blockCollectionID != null) {
@@ -159,7 +169,12 @@ public class StoragePolicySatisfier implements Runnable {
         // we want to check block movements.
         Thread.sleep(3000);
       } catch (Throwable t) {
-        isRunning = false;
+        synchronized (this) {
+          isRunning = false;
+          // Stopping monitor thread and clearing queues as well
+          this.clearQueues();
+          this.storageMovementsMonitor.stop();
+        }
         if (!namesystem.isRunning()) {
           LOG.info("Stopping StoragePolicySatisfier.");
           if (!(t instanceof InterruptedException)) {
@@ -488,4 +503,14 @@ public class StoragePolicySatisfier implements Runnable {
   BlockStorageMovementAttemptedItems getAttemptedItemsMonitor() {
     return storageMovementsMonitor;
   }
+
+  /**
+   * Clear the queues from to be storage movement needed lists and items tracked
+   * in storage movement monitor.
+   */
+  public void clearQueues() {
+    LOG.warn("Clearing all the queues from StoragePolicySatisfier. So, "
+        + "user requests on satisfying block storages would be discarded.");
+    storageMovementNeeded.clearAll();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 176b444..86df006 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4280,8 +4280,13 @@
   <name>dfs.storage.policy.satisfier.activate</name>
   <value>true</value>
   <description>
-    If true, activate StoragePolicySatisfier.
+    If true, StoragePolicySatisfier will be started along with active namenode.
     By default, StoragePolicySatisfier is activated.
+    Administrator can dynamically activate or deactivate StoragePolicySatisfier by using reconfiguration option.
+    Dynamic activation/deactivation option can be achieved in the following way.
+    1. Edit/update this configuration property values in hdfs-site.xml
+    2. Execute the reconfig command on hadoop command line prompt.
+       For example:$hdfs -reconfig namenode nn_host:port start
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index c0de63a..3e7f2e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -30,9 +30,13 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT;
@@ -40,6 +44,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
 
@@ -216,6 +222,100 @@ public class TestNameNodeReconfigure {
         datanodeManager.getHeartbeatRecheckInterval());
   }
 
+  /**
+   * Tests activate/deactivate Storage Policy Satisfier dynamically.
+   */
+  @Test(timeout = 30000)
+  public void testReconfigureStoragePolicySatisfierActivated()
+      throws ReconfigurationException {
+    final NameNode nameNode = cluster.getNameNode();
+
+    verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        true);
+    // try invalid values
+    try {
+      nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+          "text");
+      fail("ReconfigurationException expected");
+    } catch (ReconfigurationException e) {
+      GenericTestUtils.assertExceptionContains(
+          "For activating or deactivating storage policy satisfier, "
+              + "we must pass true/false only",
+          e.getCause());
+    }
+
+    // enable SPS
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        "true");
+
+    verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        true);
+
+    // disable SPS
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        "false");
+    verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        false);
+
+    // revert to default
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        "true");
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
+        true, nameNode.getNamesystem().getBlockManager()
+            .isStoragePolicySatisfierRunning());
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
+        true, nameNode.getConf()
+            .getBoolean(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false));
+  }
+
+  /**
+   * Test to satisfy storage policy after deactivating storage policy satisfier.
+   */
+  @Test(timeout = 30000)
+  public void testSatisfyStoragePolicyAfterSatisfierDeactivated()
+      throws ReconfigurationException, IOException {
+    final NameNode nameNode = cluster.getNameNode();
+
+    // deactivate SPS
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        "false");
+    verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        false);
+
+    Path filePath = new Path("/testSPS");
+    DistributedFileSystem fileSystem = cluster.getFileSystem();
+    fileSystem.create(filePath);
+    fileSystem.setStoragePolicy(filePath, "COLD");
+    try {
+      fileSystem.satisfyStoragePolicy(filePath);
+      fail("Expected to fail, as storage policy feature has deactivated.");
+    } catch (RemoteException e) {
+      GenericTestUtils
+          .assertExceptionContains("Cannot request to satisfy storage policy "
+              + "when storage policy satisfier feature has been deactivated"
+              + " by admin. Seek for an admin help to activate it "
+              + "or use Mover tool.", e);
+    }
+
+    // revert to default
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        "true");
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
+        true, nameNode.getNamesystem().getBlockManager()
+            .isStoragePolicySatisfierRunning());
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
+        true, nameNode.getConf()
+            .getBoolean(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false));
+  }
+
+  void verifySPSActivated(final NameNode nameNode, String property,
+      boolean expected) {
+    assertEquals(property + " has wrong value", expected, nameNode
+        .getNamesystem().getBlockManager().isStoragePolicySatisfierRunning());
+    assertEquals(property + " has wrong value", expected, nameNode.getConf()
+        .getBoolean(property, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT));
+  }
+
   @Test
   public void testBlockInvalidateLimitAfterReconfigured()
       throws ReconfigurationException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ced53e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 2d38f2f..ca23d62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -372,7 +372,7 @@ public class TestDFSAdmin {
     final List<String> outs = Lists.newArrayList();
     final List<String> errs = Lists.newArrayList();
     getReconfigurableProperties("namenode", address, outs, errs);
-    assertEquals(6, outs.size());
+    assertEquals(7, outs.size());
     assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1));
     assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2));
     assertEquals(errs.size(), 0);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDFS-11336: [SPS]: Remove xAttrs when movements done or SPS disabled. Contributed by Yuanbo Liu.

Posted by ra...@apache.org.
HDFS-11336: [SPS]: Remove xAttrs when movements done or SPS disabled. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bb9b40c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bb9b40c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bb9b40c

Branch: refs/heads/HDFS-10285
Commit: 4bb9b40c174f17d231d22dce1dc82af7c485fc4d
Parents: 34ba6df
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Tue Mar 14 00:52:24 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:25 2017 +0530

----------------------------------------------------------------------
 .../BlockStorageMovementAttemptedItems.java     |  14 ++-
 .../hdfs/server/namenode/FSDirAttrOp.java       |   8 ++
 .../hdfs/server/namenode/FSDirectory.java       |  16 +++
 .../server/namenode/StoragePolicySatisfier.java |  45 ++++++--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   6 +-
 .../TestPersistentStoragePolicySatisfier.java   | 112 ++++++++++++++++++-
 7 files changed, 186 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bb9b40c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 042aca3..f15db73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -54,6 +55,7 @@ public class BlockStorageMovementAttemptedItems {
   private final List<BlocksStorageMovementResult> storageMovementAttemptedResults;
   private volatile boolean monitorRunning = true;
   private Daemon timerThread = null;
+  private final StoragePolicySatisfier sps;
   //
   // It might take anywhere between 30 to 60 minutes before
   // a request is timed out.
@@ -69,7 +71,8 @@ public class BlockStorageMovementAttemptedItems {
 
   public BlockStorageMovementAttemptedItems(long recheckTimeout,
       long selfRetryTimeout,
-      BlockStorageMovementNeeded unsatisfiedStorageMovementFiles) {
+      BlockStorageMovementNeeded unsatisfiedStorageMovementFiles,
+      StoragePolicySatisfier sps) {
     if (recheckTimeout > 0) {
       this.minCheckTimeout = Math.min(minCheckTimeout, recheckTimeout);
     }
@@ -78,6 +81,7 @@ public class BlockStorageMovementAttemptedItems {
     this.blockStorageMovementNeeded = unsatisfiedStorageMovementFiles;
     storageMovementAttemptedItems = new HashMap<>();
     storageMovementAttemptedResults = new ArrayList<>();
+    this.sps = sps;
   }
 
   /**
@@ -200,6 +204,9 @@ public class BlockStorageMovementAttemptedItems {
         } catch (InterruptedException ie) {
           LOG.info("BlocksStorageMovementAttemptResultMonitor thread "
               + "is interrupted.", ie);
+        } catch (IOException ie) {
+          LOG.warn("BlocksStorageMovementAttemptResultMonitor thread "
+              + "received exception and exiting.", ie);
         }
       }
     }
@@ -248,7 +255,7 @@ public class BlockStorageMovementAttemptedItems {
   }
 
   @VisibleForTesting
-  void blockStorageMovementResultCheck() {
+  void blockStorageMovementResultCheck() throws IOException {
     synchronized (storageMovementAttemptedResults) {
       Iterator<BlocksStorageMovementResult> resultsIter =
           storageMovementAttemptedResults.iterator();
@@ -296,6 +303,9 @@ public class BlockStorageMovementAttemptedItems {
                   + " reported from co-ordinating datanode. But the trackID "
                   + "doesn't exists in storageMovementAttemptedItems list",
                   storageMovementAttemptedResult.getTrackId());
+              // Remove xattr for the track id.
+              this.sps.notifyBlkStorageMovementFinished(
+                  storageMovementAttemptedResult.getTrackId());
             }
           }
           // Remove trackID from the attempted list, if any.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bb9b40c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 2a27df1..e604726 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -541,6 +541,14 @@ public class FSDirAttrOp {
     return false;
   }
 
+  static void unprotectedRemoveSPSXAttr(INode inode, XAttr spsXAttr)
+      throws IOException{
+    List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+    existingXAttrs.remove(spsXAttr);
+    XAttrStorage.updateINodeXAttrs(inode, existingXAttrs,
+        INodesInPath.fromINode(inode).getLatestSnapshotId());
+  }
+
   private static void setDirStoragePolicy(
       FSDirectory fsd, INodesInPath iip, byte policyId) throws IOException {
     INode inode = FSDirectory.resolveLastINode(iip);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bb9b40c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index b8d90f9..2a69b11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1362,6 +1362,22 @@ public class FSDirectory implements Closeable {
     getBlockManager().satisfyStoragePolicy(inode.getId());
   }
 
+  /**
+   * Remove the SPS xattr from the inode, retrieve the inode from the
+   * block collection id.
+   * @param id
+   *           - file block collection id.
+   */
+  public void removeSPSXattr(long id) throws IOException {
+    final INode inode = getInode(id);
+    final XAttrFeature xaf = inode.getXAttrFeature();
+    final XAttr spsXAttr = xaf.getXAttr(XATTR_SATISFY_STORAGE_POLICY);
+
+    if (spsXAttr != null) {
+      FSDirAttrOp.unprotectedRemoveSPSXAttr(inode, spsXAttr);
+    }
+  }
+
   private void addEncryptionZone(INodeWithAdditionalFields inode,
       XAttrFeature xaf) {
     if (xaf == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bb9b40c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 29c8a5d..337d5b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -91,7 +92,8 @@ public class StoragePolicySatisfier implements Runnable {
         conf.getLong(
             DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
             DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT),
-        storageMovementNeeded);
+        storageMovementNeeded,
+        this);
   }
 
   /**
@@ -119,12 +121,6 @@ public class StoragePolicySatisfier implements Runnable {
    */
   public synchronized void stop(boolean reconfigStop) {
     isRunning = false;
-    if (reconfigStop) {
-      LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
-          + "deactivate it.");
-    } else {
-      LOG.info("Stopping StoragePolicySatisfier.");
-    }
     if (storagePolicySatisfierThread == null) {
       return;
     }
@@ -135,8 +131,12 @@ public class StoragePolicySatisfier implements Runnable {
     }
     this.storageMovementsMonitor.stop();
     if (reconfigStop) {
-      this.clearQueues();
+      LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
+          + "deactivate it.");
+      this.clearQueuesWithNotification();
       this.blockManager.getDatanodeManager().addDropSPSWorkCommandsToAllDNs();
+    } else {
+      LOG.info("Stopping StoragePolicySatisfier.");
     }
   }
 
@@ -717,4 +717,33 @@ public class StoragePolicySatisfier implements Runnable {
         + "user requests on satisfying block storages would be discarded.");
     storageMovementNeeded.clearAll();
   }
+
+  /**
+   * Clean all the movements in storageMovementNeeded and notify
+   * to clean up required resources.
+   * @throws IOException
+   */
+  private void clearQueuesWithNotification() {
+    Long id;
+    while ((id = storageMovementNeeded.get()) != null) {
+      try {
+        notifyBlkStorageMovementFinished(id);
+      } catch (IOException ie) {
+        LOG.warn("Failed to remove SPS "
+            + "xattr for collection id " + id, ie);
+      }
+    }
+  }
+
+  /**
+   * When block movement has been finished successfully, some additional
+   * operations should be notified, for example, SPS xattr should be
+   * removed.
+   * @param trackId track id i.e., block collection id.
+   * @throws IOException
+   */
+  public void notifyBlkStorageMovementFinished(long trackId)
+      throws IOException {
+    this.namesystem.getFSDirectory().removeSPSXattr(trackId);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bb9b40c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 7e48cde..aea4dac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -2320,6 +2320,6 @@ public class DFSTestUtil {
                 + expectedStorageCount + " and actual=" + actualStorageCount);
         return expectedStorageCount == actualStorageCount;
       }
-    }, 1000, timeout);
+    }, 500, timeout);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bb9b40c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
index 6641134..95142d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * Tests that block storage movement attempt failures are reported from DN and
@@ -36,10 +37,11 @@ public class TestBlockStorageMovementAttemptedItems {
   private final int selfRetryTimeout = 500;
 
   @Before
-  public void setup() {
+  public void setup() throws Exception {
     unsatisfiedStorageMovementFiles = new BlockStorageMovementNeeded();
+    StoragePolicySatisfier sps = Mockito.mock(StoragePolicySatisfier.class);
     bsmAttemptedItems = new BlockStorageMovementAttemptedItems(100,
-        selfRetryTimeout, unsatisfiedStorageMovementFiles);
+        selfRetryTimeout, unsatisfiedStorageMovementFiles, sps);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bb9b40c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index e4b4290..8c3359a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -20,16 +20,22 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
 
 /**
  * Test persistence of satisfying files/directories.
@@ -72,7 +78,16 @@ public class TestPersistentStoragePolicySatisfier {
    * @throws IOException
    */
   public void clusterSetUp() throws Exception {
-    clusterSetUp(false);
+    clusterSetUp(false, new HdfsConfiguration());
+  }
+
+  /**
+   * Setup environment for every test case.
+   * @param hdfsConf hdfs conf.
+   * @throws Exception
+   */
+  public void clusterSetUp(Configuration hdfsConf) throws Exception {
+    clusterSetUp(false, hdfsConf);
   }
 
   /**
@@ -80,8 +95,9 @@ public class TestPersistentStoragePolicySatisfier {
    * @param isHAEnabled if true, enable simple HA.
    * @throws IOException
    */
-  private void clusterSetUp(boolean isHAEnabled) throws Exception {
-    conf = new HdfsConfiguration();
+  private void clusterSetUp(boolean isHAEnabled, Configuration newConf)
+      throws Exception {
+    conf = newConf;
     final int dnNumber = storageTypes.length;
     final short replication = 3;
     MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf)
@@ -188,7 +204,7 @@ public class TestPersistentStoragePolicySatisfier {
   public void testWithHA() throws Exception {
     try {
       // Enable HA env for testing.
-      clusterSetUp(true);
+      clusterSetUp(true, new HdfsConfiguration());
 
       fs.setStoragePolicy(testFile, ALL_SSD);
       fs.satisfyStoragePolicy(testFile);
@@ -298,6 +314,94 @@ public class TestPersistentStoragePolicySatisfier {
   }
 
   /**
+   * Tests to verify SPS xattr will be removed if the satisfy work has
+   * been finished, expect that the method satisfyStoragePolicy can be
+   * invoked on the same file again after the block movement has been
+   * finished:
+   * 1. satisfy storage policy of file1.
+   * 2. wait until storage policy is satisfied.
+   * 3. satisfy storage policy of file1 again
+   * 4. make sure step 3 works as expected.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testMultipleSatisfyStoragePolicy() throws Exception {
+    try {
+      // Lower block movement check for testing.
+      conf = new HdfsConfiguration();
+      final long minCheckTimeout = 500; // minimum value
+      conf.setLong(
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+          minCheckTimeout);
+      clusterSetUp(conf);
+      fs.setStoragePolicy(testFile, ONE_SSD);
+      fs.satisfyStoragePolicy(testFile);
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.SSD, 1, timeout, fs);
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.DISK, 2, timeout, fs);
+
+      // Make sure that SPS xattr has been removed.
+      int retryTime = 0;
+      while (retryTime < 30) {
+        if (!fileContainsSPSXAttr(testFile)) {
+          break;
+        }
+        Thread.sleep(minCheckTimeout);
+        retryTime += 1;
+      }
+
+      fs.setStoragePolicy(testFile, COLD);
+      fs.satisfyStoragePolicy(testFile);
+      DFSTestUtil.waitExpectedStorageType(
+          testFileName, StorageType.ARCHIVE, 3, timeout, fs);
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+  /**
+   * Tests to verify SPS xattr is removed after SPS is dropped,
+   * expect that if the SPS is disabled/dropped, the SPS
+   * xattr should be removed accordingly:
+   * 1. satisfy storage policy of file1.
+   * 2. drop SPS thread in block manager.
+   * 3. make sure sps xattr is removed.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testDropSPS() throws Exception {
+    try {
+      clusterSetUp();
+      fs.setStoragePolicy(testFile, ONE_SSD);
+      fs.satisfyStoragePolicy(testFile);
+
+      cluster.getNamesystem().getBlockManager().deactivateSPS();
+
+      // Make sure satisfy xattr has been removed.
+      assertFalse(fileContainsSPSXAttr(testFile));
+
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+  /**
+   * Check whether file contains SPS xattr.
+   * @param fileName file name.
+   * @return true if file contains SPS xattr.
+   * @throws IOException
+   */
+  private boolean fileContainsSPSXAttr(Path fileName) throws IOException {
+    final INode inode = cluster.getNamesystem()
+        .getFSDirectory().getINode(fileName.toString());
+    final XAttr satisfyXAttr =
+        XAttrHelper.buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
+    List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+    return existingXAttrs.contains(satisfyXAttr);
+  }
+
+  /**
    * Restart the hole env and trigger the DataNode's heart beats.
    * @throws Exception
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HDFS-11966. [SPS] Correct the log in BlockStorageMovementAttemptedItems#blockStorageMovementResultCheck. Contributed by Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-11966. [SPS] Correct the log in BlockStorageMovementAttemptedItems#blockStorageMovementResultCheck. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cc681d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cc681d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cc681d0

Branch: refs/heads/HDFS-10285
Commit: 8cc681d09f602baa7ccb42610cc7c231c122a191
Parents: caea9fe
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Sun Jun 18 11:00:28 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:04 2017 +0530

----------------------------------------------------------------------
 .../BlockStorageMovementAttemptedItems.java     | 39 ++++++++++----------
 1 file changed, 20 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cc681d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index bf7859c..6048986 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -296,19 +296,17 @@ public class BlockStorageMovementAttemptedItems {
             .next();
         synchronized (storageMovementAttemptedItems) {
           Status status = storageMovementAttemptedResult.getStatus();
+          long trackId = storageMovementAttemptedResult.getTrackId();
           ItemInfo itemInfo;
           switch (status) {
           case FAILURE:
-            blockStorageMovementNeeded
-                .add(storageMovementAttemptedResult.getTrackId());
+            blockStorageMovementNeeded.add(trackId);
             LOG.warn("Blocks storage movement results for the tracking id: {}"
                 + " is reported from co-ordinating datanode, but result"
-                + " status is FAILURE. So, added for retry",
-                storageMovementAttemptedResult.getTrackId());
+                + " status is FAILURE. So, added for retry", trackId);
             break;
           case SUCCESS:
-            itemInfo = storageMovementAttemptedItems
-                .get(storageMovementAttemptedResult.getTrackId());
+            itemInfo = storageMovementAttemptedItems.get(trackId);
 
             // ItemInfo could be null. One case is, before the blocks movements
             // result arrives the attempted trackID became timed out and then
@@ -318,20 +316,23 @@ public class BlockStorageMovementAttemptedItems {
             // following condition. If all the block locations under the trackID
             // are attempted and failed to find matching target nodes to satisfy
             // storage policy in previous SPS iteration.
-            if (itemInfo != null
-                && !itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
-              blockStorageMovementNeeded
-                  .add(storageMovementAttemptedResult.getTrackId());
-              LOG.warn("Blocks storage movement is SUCCESS for the track id: {}"
-                  + " reported from co-ordinating datanode. But adding trackID"
-                  + " back to retry queue as some of the blocks couldn't find"
-                  + " matching target nodes in previous SPS iteration.",
-                  storageMovementAttemptedResult.getTrackId());
+            String msg = "Blocks storage movement is SUCCESS for the track id: "
+                + trackId + " reported from co-ordinating datanode.";
+            if (itemInfo != null) {
+              if (!itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
+                blockStorageMovementNeeded.add(trackId);
+                LOG.warn("{} But adding trackID back to retry queue as some of"
+                    + " the blocks couldn't find matching target nodes in"
+                    + " previous SPS iteration.", msg);
+              } else {
+                LOG.info(msg);
+                // Remove xattr for the track id.
+                this.sps.postBlkStorageMovementCleanup(
+                    storageMovementAttemptedResult.getTrackId());
+              }
             } else {
-              LOG.info("Blocks storage movement is SUCCESS for the track id: {}"
-                  + " reported from co-ordinating datanode. But the trackID "
-                  + "doesn't exists in storageMovementAttemptedItems list",
-                  storageMovementAttemptedResult.getTrackId());
+              LOG.info("{} But the trackID doesn't exists in "
+                  + "storageMovementAttemptedItems list", msg);
               // Remove xattr for the track id.
               this.sps.postBlkStorageMovementCleanup(
                   storageMovementAttemptedResult.getTrackId());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HDFS-10885. [SPS]: Mover tool should not be allowed to run when Storage Policy Satisfier is on. Contributed by Wei Zhou

Posted by ra...@apache.org.
HDFS-10885. [SPS]: Mover tool should not be allowed to run when Storage Policy Satisfier is on. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf8df299
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf8df299
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf8df299

Branch: refs/heads/HDFS-10285
Commit: cf8df2996bb022c397aaab54cf102ae0728633c9
Parents: 68ede8e
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Tue Dec 6 17:56:08 2016 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:51 2017 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  8 +++
 .../ClientNamenodeProtocolTranslatorPB.java     | 18 ++++++
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  5 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java | 18 ++++++
 .../hadoop/hdfs/server/balancer/ExitStatus.java |  3 +-
 .../server/blockmanagement/BlockManager.java    | 27 ++++++++-
 .../hdfs/server/common/HdfsServerConstants.java |  3 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 23 ++++++-
 .../hdfs/server/namenode/FSNamesystem.java      |  6 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java | 11 ++++
 .../server/namenode/StoragePolicySatisfier.java | 37 +++++++++++
 .../src/main/resources/hdfs-default.xml         |  9 +++
 .../TestStoragePolicySatisfyWorker.java         |  2 +
 .../hadoop/hdfs/server/mover/TestMover.java     | 64 ++++++++++++++++++--
 .../hdfs/server/mover/TestStorageMover.java     |  2 +
 .../namenode/TestStoragePolicySatisfier.java    | 23 +++++++
 18 files changed, 258 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index e6f731d..8ea0407 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3098,6 +3098,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
   }
 
+  public boolean isStoragePolicySatisfierRunning() throws IOException {
+    return namenode.isStoragePolicySatisfierRunning();
+  }
+
   Tracer getTracer() {
     return tracer;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 4232fef..af70eea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1684,4 +1684,12 @@ public interface ClientProtocol {
    */
   @Idempotent
   void satisfyStoragePolicy(String path) throws IOException;
+
+  /**
+   * Check if StoragePolicySatisfier is running.
+   * @return true if StoragePolicySatisfier is running
+   * @throws IOException
+   */
+  @Idempotent
+  boolean isStoragePolicySatisfierRunning() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 7b5ca4f..cf22be2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -138,6 +138,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSto
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsStoragePolicySatisfierRunningRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsStoragePolicySatisfierRunningResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
@@ -280,6 +282,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
   private final static GetErasureCodingCodecsRequestProto
       VOID_GET_EC_CODEC_REQUEST = GetErasureCodingCodecsRequestProto
       .newBuilder().build();
+  private final static IsStoragePolicySatisfierRunningRequestProto
+      VOID_IS_SPS_RUNNING_REQUEST = IsStoragePolicySatisfierRunningRequestProto
+      .newBuilder().build();
+
 
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
     rpcProxy = proxy;
@@ -1834,6 +1840,18 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
+  public boolean isStoragePolicySatisfierRunning() throws IOException {
+    try {
+      IsStoragePolicySatisfierRunningResponseProto rep =
+          rpcProxy.isStoragePolicySatisfierRunning(null,
+              VOID_IS_SPS_RUNNING_REQUEST);
+      return rep.getRunning();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
   public QuotaUsage getQuotaUsage(String path) throws IOException {
     GetQuotaUsageRequestProto req =
         GetQuotaUsageRequestProto.newBuilder().setPath(path).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index c601283..4157540 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -801,6 +801,13 @@ message SatisfyStoragePolicyResponseProto {
 
 }
 
+message IsStoragePolicySatisfierRunningRequestProto {  // no parameters
+}
+
+message IsStoragePolicySatisfierRunningResponseProto {
+  required bool running = 1;
+}
+
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
@@ -983,4 +990,6 @@ service ClientNamenodeProtocol {
       returns(ListOpenFilesResponseProto);
   rpc satisfyStoragePolicy(SatisfyStoragePolicyRequestProto)
       returns(SatisfyStoragePolicyResponseProto);
+  rpc isStoragePolicySatisfierRunning(IsStoragePolicySatisfierRunningRequestProto)
+      returns(IsStoragePolicySatisfierRunningResponseProto);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b836ff8..1c8934f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -540,6 +540,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = "dfs.mover.max-no-move-interval";
   public static final int    DFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute
 
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
+      "dfs.storage.policy.satisfier.activate";
+  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
+      true;
+
   public static final String  DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
   public static final int     DFS_DATANODE_DEFAULT_PORT = 9866;
   public static final String  DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_DEFAULT_PORT;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 8d6d570..7892476 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -150,6 +150,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSto
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsStoragePolicySatisfierRunningRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsStoragePolicySatisfierRunningResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
@@ -1794,6 +1796,22 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
 
   @Override
+  public IsStoragePolicySatisfierRunningResponseProto
+      isStoragePolicySatisfierRunning(RpcController controller,
+      IsStoragePolicySatisfierRunningRequestProto req)
+      throws ServiceException {
+    try {
+      boolean ret = server.isStoragePolicySatisfierRunning();
+      IsStoragePolicySatisfierRunningResponseProto.Builder builder =
+          IsStoragePolicySatisfierRunningResponseProto.newBuilder();
+      builder.setRunning(ret);
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public GetQuotaUsageResponseProto getQuotaUsage(
       RpcController controller, GetQuotaUsageRequestProto req)
       throws ServiceException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
index 6bf2986..5cf4204 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
@@ -30,7 +30,8 @@ public enum ExitStatus {
   IO_EXCEPTION(-4),
   ILLEGAL_ARGUMENTS(-5),
   INTERRUPTED(-6),
-  UNFINALIZED_UPGRADE(-7);
+  UNFINALIZED_UPGRADE(-7),
+  SKIPPED_DUE_TO_SPS(-8);
 
   private final int code;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 763ec19..ca978ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -457,7 +457,24 @@ public class BlockManager implements BlockStatsMXBean {
         DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT)
         * 1000L);
 
-    sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this);
+    final boolean storagePolicyEnabled =
+        conf.getBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
+            DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
+    final boolean spsEnabled =
+        conf.getBoolean(
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT);
+    if (storagePolicyEnabled && spsEnabled) {
+      sps = new StoragePolicySatisfier(namesystem,
+          storageMovementNeeded, this);
+    } else {
+      sps = null;
+      LOG.warn(
+          "Failed to start StoragePolicySatisfier"
+              + " since {} set to {} and {} set to {}.",
+          DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled,
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, spsEnabled);
+    }
     blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
     this.maxCorruptFilesReturned = conf.getInt(
@@ -677,11 +694,15 @@ public class BlockManager implements BlockStatsMXBean {
     this.blockReportThread.start();
     mxBeanName = MBeans.register("NameNode", "BlockStats", this);
     bmSafeMode.activate(blockTotal);
-    sps.start();
+    if (sps != null) {
+      sps.start();
+    }
   }
 
   public void close() {
-	sps.stop();
+    if (sps != null) {
+      sps.stop();
+    }
     bmSafeMode.close();
     try {
       redundancyThread.interrupt();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index c3098f3..e486317 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -24,6 +24,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
@@ -364,6 +365,8 @@ public interface HdfsServerConstants {
   String XATTR_ERASURECODING_POLICY =
       "system.hdfs.erasurecoding.policy";
 
+  Path MOVER_ID_PATH = new Path("/system/mover.id");
+
   long BLOCK_GROUP_INDEX_MASK = 15;
   byte MAX_BLOCKS_IN_GROUP = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index b653f4f..dd6f90e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -41,11 +41,14 @@ import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
 import org.apache.hadoop.hdfs.server.balancer.Matcher;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.SecurityUtil;
@@ -70,8 +73,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 public class Mover {
   static final Log LOG = LogFactory.getLog(Mover.class);
 
-  static final Path MOVER_ID_PATH = new Path("/system/mover.id");
-
   private static class StorageMap {
     private final StorageGroupMap<Source> sources
         = new StorageGroupMap<Source>();
@@ -645,7 +646,7 @@ public class Mover {
     List<NameNodeConnector> connectors = Collections.emptyList();
     try {
       connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
-          Mover.class.getSimpleName(), MOVER_ID_PATH, conf,
+          Mover.class.getSimpleName(), HdfsServerConstants.MOVER_ID_PATH, conf,
           NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
 
       while (connectors.size() > 0) {
@@ -655,6 +656,22 @@ public class Mover {
           NameNodeConnector nnc = iter.next();
           final Mover m = new Mover(nnc, conf, retryCount,
               excludedPinnedBlocks);
+
+          boolean spsRunning;
+          try {
+            spsRunning = nnc.getDistributedFileSystem().getClient()
+                .isStoragePolicySatisfierRunning();
+          } catch (StandbyException e) {
+            System.err.println("Skip Standby Namenode. " + nnc.toString());
+            continue;
+          }
+          if (spsRunning) {
+            System.err.println("Mover failed due to StoragePolicySatisfier"
+                + " is running. Exiting with status "
+                + ExitStatus.SKIPPED_DUE_TO_SPS + "... ");
+            return ExitStatus.SKIPPED_DUE_TO_SPS.getExitCode();
+          }
+
           final ExitStatus r = m.run();
 
           if (r == ExitStatus.SUCCESS) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index eefea3f..c851b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3769,8 +3769,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
       // TODO: Handle blocks movement results send by the coordinator datanode.
       // This has to be revisited as part of HDFS-11029.
-      blockManager.getStoragePolicySatisfier()
-          .handleBlocksStorageMovementResults(blksMovementResults);
+      if (blockManager.getStoragePolicySatisfier() != null) {
+        blockManager.getStoragePolicySatisfier()
+            .handleBlocksStorageMovementResults(blksMovementResults);
+      }
 
       //create ha status
       final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index b9b8b69..1f782e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -2394,4 +2394,15 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     namesystem.checkSuperuserPrivilege();
     return Lists.newArrayList(nn.getReconfigurableProperties());
   }
+
+  @Override
+  public boolean isStoragePolicySatisfierRunning() throws IOException {
+    checkNNStartup();
+    if (nn.isStandbyState()) {
+      throw new StandbyException("Not supported by Standby Namenode.");
+    }
+    StoragePolicySatisfier sps = namesystem.getBlockManager()
+        .getStoragePolicySatisfier();
+    return sps != null && sps.isRunning();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 617ab2c..cc2ca7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@@ -70,6 +72,7 @@ public class StoragePolicySatisfier implements Runnable {
   private final BlockManager blockManager;
   private final BlockStorageMovementNeeded storageMovementNeeded;
   private final BlockStorageMovementAttemptedItems storageMovementsMonitor;
+  private volatile boolean isRunning = false;
 
   public StoragePolicySatisfier(final Namesystem namesystem,
       final BlockStorageMovementNeeded storageMovementNeeded,
@@ -99,6 +102,7 @@ public class StoragePolicySatisfier implements Runnable {
    * Stop storage policy satisfier demon thread.
    */
   public void stop() {
+    isRunning = false;
     if (storagePolicySatisfierThread == null) {
       return;
     }
@@ -110,8 +114,40 @@ public class StoragePolicySatisfier implements Runnable {
     this.storageMovementsMonitor.stop();
   }
 
+  /**
+   * Check whether StoragePolicySatisfier is running.
+   * @return true if running
+   */
+  public boolean isRunning() {
+    return isRunning;
+  }
+
+  // Return true if a Mover instance is running
+  private boolean checkIfMoverRunning() {
+    boolean ret = false;
+    try {
+      String moverId = HdfsServerConstants.MOVER_ID_PATH.toString();
+      INode inode = namesystem.getFSDirectory().getINode(
+          moverId, FSDirectory.DirOp.READ);
+      if (inode != null) {
+        ret = true;
+      }
+    } catch (IOException e) {
+      LOG.info("StoragePolicySatisfier is enabled as no Mover ID file found.");
+      ret = false;
+    }
+    return ret;
+  }
+
   @Override
   public void run() {
+    isRunning = !checkIfMoverRunning();
+    if (!isRunning) {
+      LOG.error("StoragePolicySatisfier thread stopped "
+          + "as Mover ID file " + HdfsServerConstants.MOVER_ID_PATH.toString()
+          + " exists");
+      return;
+    }
     while (namesystem.isRunning()) {
       try {
         Long blockCollectionID = storageMovementNeeded.get();
@@ -123,6 +159,7 @@ public class StoragePolicySatisfier implements Runnable {
         // we want to check block movements.
         Thread.sleep(3000);
       } catch (Throwable t) {
+        isRunning = false;
         if (!namesystem.isRunning()) {
           LOG.info("Stopping StoragePolicySatisfier.");
           if (!(t instanceof InterruptedException)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 2a42b5c..176b444 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4277,6 +4277,15 @@
   </property>
 
 <property>
+  <name>dfs.storage.policy.satisfier.activate</name>
+  <value>true</value>
+  <description>
+    If true, activate StoragePolicySatisfier.
+    By default, StoragePolicySatisfier is activated.
+  </description>
+</property>
+
+<property>
   <name>dfs.pipeline.ecn</name>
   <value>false</value>
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
index 1eb44e0..8e02d41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -67,6 +67,8 @@ public class TestStoragePolicySatisfyWorker {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
         1L);
     conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+        true);
   }
 
   @Before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index 707d46f..7bcc9c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.http.HttpConfig;
@@ -112,6 +113,8 @@ public class TestMover {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
         1L);
     conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
   }
 
   static Mover newMover(Configuration conf) throws IOException {
@@ -123,7 +126,7 @@ public class TestMover {
     }
 
     final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
-        nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
+        nnMap, Mover.class.getSimpleName(), HdfsServerConstants.MOVER_ID_PATH, conf,
         NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
     return new Mover(nncs.get(0), conf, new AtomicInteger(0), new HashMap<>());
   }
@@ -131,6 +134,8 @@ public class TestMover {
   @Test
   public void testScheduleSameBlock() throws IOException {
     final Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(4).build();
     try {
@@ -245,8 +250,11 @@ public class TestMover {
    */
   @Test
   public void testMoverCli() throws Exception {
+    final Configuration clusterConf = new HdfsConfiguration();
+    clusterConf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster
-        .Builder(new HdfsConfiguration()).numDataNodes(0).build();
+        .Builder(clusterConf).numDataNodes(0).build();
     try {
       final Configuration conf = cluster.getConfiguration(0);
       try {
@@ -278,8 +286,10 @@ public class TestMover {
   @Test
   public void testMoverCliWithHAConf() throws Exception {
     final Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster
-        .Builder(new HdfsConfiguration())
+        .Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(0).build();
     HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
@@ -300,11 +310,16 @@ public class TestMover {
 
   @Test
   public void testMoverCliWithFederation() throws Exception {
+    final Configuration clusterConf = new HdfsConfiguration();
+    clusterConf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster
-        .Builder(new HdfsConfiguration())
+        .Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
         .numDataNodes(0).build();
     final Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
     DFSTestUtil.setFederatedConfiguration(cluster, conf);
     try {
       Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
@@ -348,11 +363,16 @@ public class TestMover {
 
   @Test
   public void testMoverCliWithFederationHA() throws Exception {
+    final Configuration clusterConf = new HdfsConfiguration();
+    clusterConf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster
-        .Builder(new HdfsConfiguration())
+        .Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
         .numDataNodes(0).build();
     final Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
     DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
     try {
       Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
@@ -416,6 +436,8 @@ public class TestMover {
   public void testMoveWhenStoragePolicyNotSatisfying() throws Exception {
     // HDFS-8147
     final Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(3)
         .storageTypes(
@@ -441,6 +463,36 @@ public class TestMover {
     }
   }
 
+  @Test(timeout = 300000)
+  public void testMoveWhenStoragePolicySatisfierIsRunning() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, true);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(3)
+        .storageTypes(
+            new StorageType[][] {{StorageType.DISK}, {StorageType.DISK},
+                {StorageType.DISK}}).build();
+    try {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final String file = "/testMoveWhenStoragePolicySatisfierIsRunning";
+      // write to DISK
+      final FSDataOutputStream out = dfs.create(new Path(file));
+      out.writeChars("testMoveWhenStoragePolicySatisfierIsRunning");
+      out.close();
+
+      // move to ARCHIVE
+      dfs.setStoragePolicy(new Path(file), "COLD");
+      int rc = ToolRunner.run(conf, new Mover.Cli(),
+          new String[] {"-p", file.toString()});
+      int exitcode = ExitStatus.SKIPPED_DUE_TO_SPS.getExitCode();
+      Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   @Test
   public void testMoverFailedRetry() throws Exception {
     // HDFS-8147
@@ -491,6 +543,8 @@ public class TestMover {
         1L);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
         false);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
   }
 
   @Test(timeout = 300000)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index 764a0db..6ef0361 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -96,6 +96,8 @@ public class TestStorageMover {
     DEFAULT_CONF.setLong(
         DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 2L);
     DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
+    DEFAULT_CONF.setBoolean(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
 
     DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
     HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8df299/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 499fe3c..fe23f3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -31,12 +31,14 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
@@ -442,6 +444,27 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Tests to verify that SPS should not start when a Mover instance
+   * is running.
+   */
+  @Test(timeout = 300000)
+  public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
+      throws IOException {
+    try {
+      // Simulate Mover by creating MOVER_ID file
+      DFSTestUtil.createFile(hdfsCluster.getFileSystem(),
+          HdfsServerConstants.MOVER_ID_PATH, 0, (short) 1, 0);
+      hdfsCluster.restartNameNode(true);
+      boolean running = hdfsCluster.getFileSystem()
+          .getClient().isStoragePolicySatisfierRunning();
+      Assert.assertFalse("SPS should not start "
+          + "when a Mover instance is running", running);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
   private void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
       int timeout) throws TimeoutException, InterruptedException {
     BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the edits log. Contributed by Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the edits log. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5909a381
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5909a381
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5909a381

Branch: refs/heads/HDFS-10285
Commit: 5909a381da8da13722ccaeef82251390a1dfc15b
Parents: 0b03ec6
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Mon May 22 21:39:43 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:46 2017 +0530

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirAttrOp.java       |  91 ------------
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 145 +++++++++++++++++++
 .../hdfs/server/namenode/FSDirXAttrOp.java      |   2 +-
 .../hdfs/server/namenode/FSDirectory.java       |  16 --
 .../hdfs/server/namenode/FSNamesystem.java      |  24 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  10 ++
 .../server/namenode/StoragePolicySatisfier.java |   4 +-
 .../TestPersistentStoragePolicySatisfier.java   |  90 +++++++++++-
 .../namenode/TestStoragePolicySatisfier.java    |   5 +-
 9 files changed, 268 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index e604726..d4b24f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -43,14 +42,12 @@ import com.google.common.collect.Lists;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
@@ -200,29 +197,6 @@ public class FSDirAttrOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
-      String src, boolean logRetryCache) throws IOException {
-
-    FSPermissionChecker pc = fsd.getPermissionChecker();
-    List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
-    INodesInPath iip;
-    fsd.writeLock();
-    try {
-
-      // check operation permission.
-      iip = fsd.resolvePath(pc, src, DirOp.WRITE);
-      if (fsd.isPermissionEnabled()) {
-        fsd.checkPathAccess(pc, iip, FsAction.WRITE);
-      }
-      XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-      xAttrs.add(satisfyXAttr);
-    } finally {
-      fsd.writeUnlock();
-    }
-    fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-    return fsd.getAuditFileInfo(iip);
-  }
-
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
       throws IOException {
     return bm.getStoragePolicies();
@@ -484,71 +458,6 @@ public class FSDirAttrOp {
     }
   }
 
-  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
-      BlockManager bm, FSDirectory fsd) throws IOException {
-
-    final INode inode = FSDirectory.resolveLastINode(iip);
-    final int snapshotId = iip.getLatestSnapshotId();
-    final List<INode> candidateNodes = new ArrayList<>();
-
-    // TODO: think about optimization here, label the dir instead
-    // of the sub-files of the dir.
-    if (inode.isFile()) {
-      candidateNodes.add(inode);
-    } else if (inode.isDirectory()) {
-      for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-        if (node.isFile()) {
-          candidateNodes.add(node);
-        }
-      }
-    }
-
-    // If node has satisfy xattr, then stop adding it
-    // to satisfy movement queue.
-    if (inodeHasSatisfyXAttr(candidateNodes)) {
-      throw new IOException(
-          "Cannot request to call satisfy storage policy on path "
-          + iip.getPath()
-          + ", as this file/dir was already called for satisfying "
-          + "storage policy.");
-    }
-
-    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
-    final XAttr satisfyXAttr =
-        XAttrHelper.buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
-    xattrs.add(satisfyXAttr);
-
-    for (INode node : candidateNodes) {
-      bm.satisfyStoragePolicy(node.getId());
-      List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(node);
-      List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(
-          fsd, existingXAttrs, xattrs, EnumSet.of(XAttrSetFlag.CREATE));
-      XAttrStorage.updateINodeXAttrs(node, newXAttrs, snapshotId);
-    }
-    return satisfyXAttr;
-  }
-
-  private static boolean inodeHasSatisfyXAttr(List<INode> candidateNodes) {
-    // If the node is a directory and one of the child files
-    // has satisfy xattr, then return true for this directory.
-    for (INode inode : candidateNodes) {
-      final XAttrFeature f = inode.getXAttrFeature();
-      if (inode.isFile() &&
-          f != null && f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  static void unprotectedRemoveSPSXAttr(INode inode, XAttr spsXAttr)
-      throws IOException{
-    List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
-    existingXAttrs.remove(spsXAttr);
-    XAttrStorage.updateINodeXAttrs(inode, existingXAttrs,
-        INodesInPath.fromINode(inode).getLatestSnapshotId());
-  }
-
   private static void setDirStoragePolicy(
       FSDirectory fsd, INodesInPath iip, byte policyId) throws IOException {
     INode inode = FSDirectory.resolveLastINode(iip);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
new file mode 100644
index 0000000..81d337f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Helper class to perform storage policy satisfier related operations.
+ */
+final class FSDirSatisfyStoragePolicyOp {
+
+  /**
+   * Private constructor for preventing FSDirSatisfyStoragePolicyOp object
+   * creation. Static-only class.
+   */
+  private FSDirSatisfyStoragePolicyOp() {
+  }
+
+  static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
+      String src, boolean logRetryCache) throws IOException {
+
+    assert fsd.getFSNamesystem().hasWriteLock();
+    FSPermissionChecker pc = fsd.getPermissionChecker();
+    List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+    INodesInPath iip;
+    fsd.writeLock();
+    try {
+
+      // check operation permission.
+      iip = fsd.resolvePath(pc, src, DirOp.WRITE);
+      if (fsd.isPermissionEnabled()) {
+        fsd.checkPathAccess(pc, iip, FsAction.WRITE);
+      }
+      XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
+      xAttrs.add(satisfyXAttr);
+      fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+    } finally {
+      fsd.writeUnlock();
+    }
+    return fsd.getAuditFileInfo(iip);
+  }
+
+  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
+      BlockManager bm, FSDirectory fsd) throws IOException {
+
+    final INode inode = FSDirectory.resolveLastINode(iip);
+    final int snapshotId = iip.getLatestSnapshotId();
+    final List<INode> candidateNodes = new ArrayList<>();
+
+    // TODO: think about optimization here, label the dir instead
+    // of the sub-files of the dir.
+    if (inode.isFile()) {
+      candidateNodes.add(inode);
+    } else if (inode.isDirectory()) {
+      for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
+        if (node.isFile()) {
+          candidateNodes.add(node);
+        }
+      }
+    }
+
+    // If node has satisfy xattr, then stop adding it
+    // to satisfy movement queue.
+    if (inodeHasSatisfyXAttr(candidateNodes)) {
+      throw new IOException(
+          "Cannot request to call satisfy storage policy on path "
+              + iip.getPath()
+              + ", as this file/dir was already called for satisfying "
+              + "storage policy.");
+    }
+
+    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
+    final XAttr satisfyXAttr = XAttrHelper
+        .buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
+    xattrs.add(satisfyXAttr);
+
+    for (INode node : candidateNodes) {
+      bm.satisfyStoragePolicy(node.getId());
+      List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(node);
+      List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs,
+          xattrs, EnumSet.of(XAttrSetFlag.CREATE));
+      XAttrStorage.updateINodeXAttrs(node, newXAttrs, snapshotId);
+    }
+    return satisfyXAttr;
+  }
+
+  private static boolean inodeHasSatisfyXAttr(List<INode> candidateNodes) {
+    // If the node is a directory and one of the child files
+    // has satisfy xattr, then return true for this directory.
+    for (INode inode : candidateNodes) {
+      final XAttrFeature f = inode.getXAttrFeature();
+      if (inode.isFile() && f != null
+          && f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  static void removeSPSXattr(FSDirectory fsd, INode inode, XAttr spsXAttr)
+      throws IOException {
+    try {
+      fsd.writeLock();
+      List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+      existingXAttrs.remove(spsXAttr);
+      XAttrStorage.updateINodeXAttrs(inode, existingXAttrs, INodesInPath
+          .fromINode(inode).getLatestSnapshotId());
+      List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+      xAttrs.add(spsXAttr);
+      fsd.getEditLog().logRemoveXAttrs(inode.getFullPathName(), xAttrs, false);
+    } finally {
+      fsd.writeUnlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index f21aa17..b73b1ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -287,7 +287,7 @@ class FSDirXAttrOp {
 
       // Add inode id to movement queue if xattrs contain satisfy xattr.
       if (XATTR_SATISFY_STORAGE_POLICY.equals(xaName)) {
-        FSDirAttrOp.unprotectedSatisfyStoragePolicy(iip,
+        FSDirSatisfyStoragePolicyOp.unprotectedSatisfyStoragePolicy(iip,
             fsd.getBlockManager(), fsd);
         continue;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 2a69b11..b8d90f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1362,22 +1362,6 @@ public class FSDirectory implements Closeable {
     getBlockManager().satisfyStoragePolicy(inode.getId());
   }
 
-  /**
-   * Remove the SPS xattr from the inode, retrieve the inode from the
-   * block collection id.
-   * @param id
-   *           - file block collection id.
-   */
-  public void removeSPSXattr(long id) throws IOException {
-    final INode inode = getInode(id);
-    final XAttrFeature xaf = inode.getXAttrFeature();
-    final XAttr spsXAttr = xaf.getXAttr(XATTR_SATISFY_STORAGE_POLICY);
-
-    if (spsXAttr != null) {
-      FSDirAttrOp.unprotectedRemoveSPSXAttr(inode, spsXAttr);
-    }
-  }
-
   private void addEncryptionZone(INodeWithAdditionalFields inode,
       XAttrFeature xaf) {
     if (xaf == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 3f2a6f8..7751921 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2173,10 +2173,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
                 + " by admin. Seek for an admin help to activate it "
                 + "or use Mover tool.");
       }
-      FSDirAttrOp.satisfyStoragePolicy(dir, blockManager, src, logRetryCache);
+      FSDirSatisfyStoragePolicyOp.satisfyStoragePolicy(dir, blockManager, src,
+          logRetryCache);
     } finally {
       writeUnlock();
     }
+    getEditLog().logSync();
   }
 
   /**
@@ -7507,6 +7509,26 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     logAuditEvent(true, operationName, src, null, auditStat);
   }
 
+  @Override
+  public void removeXattr(long id, String xattrName) throws IOException {
+    writeLock();
+    try {
+      final INode inode = dir.getInode(id);
+      final XAttrFeature xaf = inode.getXAttrFeature();
+      if (xaf == null) {
+        return;
+      }
+      final XAttr spsXAttr = xaf.getXAttr(xattrName);
+
+      if (spsXAttr != null) {
+        FSDirSatisfyStoragePolicyOp.removeSPSXattr(dir, inode, spsXAttr);
+      }
+    } finally {
+      writeUnlock("removeXAttr");
+    }
+    getEditLog().logSync();
+  }
+
   void checkAccess(String src, FsAction mode) throws IOException {
     final String operationName = "checkAccess";
     checkOperation(OperationCategory.READ);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
index a2b07ca..e58fa72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.IOException;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
@@ -52,4 +54,12 @@ public interface Namesystem extends RwLock, SafeMode {
    * @return true if valid write lease exists, otherwise return false.
    */
   boolean isFileOpenedForWrite(String filePath);
+
+  /**
+   * Remove xAttr from the inode.
+   * @param id
+   * @param xattrName
+   * @throws IOException
+   */
+  void removeXattr(long id, String xattrName) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 3b20314..9e2a4a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -829,6 +831,6 @@ public class StoragePolicySatisfier implements Runnable {
    */
   public void postBlkStorageMovementCleanup(long trackId)
       throws IOException {
-    this.namesystem.getFSDirectory().removeSPSXattr(trackId);
+    this.namesystem.removeXattr(trackId, XATTR_SATISFY_STORAGE_POLICY);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 41c272c..bdf0159 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -32,6 +32,7 @@ import org.junit.Test;
 import java.io.IOException;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.*;
 
 /**
  * Test persistence of satisfying files/directories.
@@ -60,11 +61,9 @@ public class TestPersistentStoragePolicySatisfier {
   private static final String ALL_SSD = "ALL_SSD";
 
   private static StorageType[][] storageTypes = new StorageType[][] {
-      {StorageType.ARCHIVE, StorageType.DISK},
-      {StorageType.DISK, StorageType.SSD},
-      {StorageType.SSD, StorageType.RAM_DISK},
-      {StorageType.ARCHIVE, StorageType.DISK},
-      {StorageType.ARCHIVE, StorageType.SSD}
+      {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD},
+      {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD},
+      {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD}
   };
 
   private final int timeout = 300000;
@@ -94,10 +93,13 @@ public class TestPersistentStoragePolicySatisfier {
   private void clusterSetUp(boolean isHAEnabled, Configuration newConf)
       throws Exception {
     conf = newConf;
+    conf.set(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+        "3000");
     final int dnNumber = storageTypes.length;
     final short replication = 3;
     MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf)
-        .storageTypes(storageTypes)
+        .storageTypes(storageTypes).storagesPerDatanode(3)
         .numDataNodes(dnNumber);
     if (isHAEnabled) {
       clusterBuilder.nnTopology(MiniDFSNNTopology.simpleHATopology());
@@ -277,9 +279,10 @@ public class TestPersistentStoragePolicySatisfier {
    */
   @Test(timeout = 300000)
   public void testWithFederationHA() throws Exception {
+    MiniDFSCluster haCluster = null;
     try {
       conf = new HdfsConfiguration();
-      final MiniDFSCluster haCluster = new MiniDFSCluster
+      haCluster = new MiniDFSCluster
           .Builder(conf)
           .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
           .storageTypes(storageTypes)
@@ -305,7 +308,14 @@ public class TestPersistentStoragePolicySatisfier {
           testFileName, StorageType.ARCHIVE, 2, timeout, fs);
 
     } finally {
-      clusterShutdown();
+      if(fs != null) {
+        fs.close();
+        fs = null;
+      }
+      if(haCluster != null) {
+        haCluster.shutdown(true);
+        haCluster = null;
+      }
     }
   }
 
@@ -404,6 +414,70 @@ public class TestPersistentStoragePolicySatisfier {
   }
 
   /**
+   * Test loading of SPS xAttrs from the edits log when satisfyStoragePolicy
+   * called on child file and parent directory.
+   * 1. Create one directory and create one child file.
+   * 2. Set storage policy for child file and call
+   * satisfyStoragePolicy.
+   * 3. wait for SPS to remove xAttr for file child file.
+   * 4. Set storage policy for parent directory and call
+   * satisfyStoragePolicy.
+   * 5. restart the namenode.
+   * NameNode should be started successfully.
+   */
+  @Test(timeout = 300000)
+  public void testNameNodeRestartWhenSPSCalledOnChildFileAndParentDir()
+      throws Exception {
+    try {
+      clusterSetUp();
+      fs.setStoragePolicy(childFile, "COLD");
+      fs.satisfyStoragePolicy(childFile);
+      DFSTestUtil.waitExpectedStorageType(childFile.toUri().getPath(),
+          StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
+      // wait for SPS to remove Xattr from file
+      Thread.sleep(30000);
+      fs.setStoragePolicy(childDir, "COLD");
+      fs.satisfyStoragePolicy(childDir);
+      try {
+        cluster.restartNameNodes();
+      } catch (Exception e) {
+        assertFalse(e.getMessage().contains(
+            "Cannot request to call satisfy storage policy"));
+      }
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+  /**
+   * Test SPS when satisfyStoragePolicy called on child file and
+   * parent directory.
+   * 1. Create one parent directory and child directory.
+   * 2. Create some file in both the directory.
+   * 3. Set storage policy for parent directory and call
+   * satisfyStoragePolicy.
+   * 4. Set storage policy for child directory and call
+   * satisfyStoragePolicy.
+   * 5. restart the namenode.
+   * All the file blocks should satisfy the policy.
+   */
+  @Test(timeout = 300000)
+  public void testSPSOnChildAndParentDirectory() throws Exception {
+    try {
+      clusterSetUp();
+      fs.setStoragePolicy(parentDir, "COLD");
+      fs.satisfyStoragePolicy(childDir);
+      fs.satisfyStoragePolicy(parentDir);
+      DFSTestUtil.waitExpectedStorageType(childFileName, StorageType.ARCHIVE,
+          3, 30000, cluster.getFileSystem());
+      DFSTestUtil.waitExpectedStorageType(parentFileName, StorageType.ARCHIVE,
+          3, 30000, cluster.getFileSystem());
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+  /**
    * Restart the hole env and trigger the DataNode's heart beats.
    * @throws Exception
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5909a381/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 8457e5b..fa954b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -855,7 +856,9 @@ public class TestStoragePolicySatisfier {
             {StorageType.DISK, StorageType.SSD}};
 
     int defaultStripedBlockSize =
-        ErasureCodingPolicyManager.getSystemPolicies()[0].getCellSize() * 4;
+        StripedFileTestUtil.getDefaultECPolicy().getCellSize() * 4;
+    config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+        StripedFileTestUtil.getDefaultECPolicy().getName());
     config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
     config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.

Posted by ra...@apache.org.
HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac4d9b0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac4d9b0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac4d9b0d

Branch: refs/heads/HDFS-10285
Commit: ac4d9b0dbf89de93ec8c1c5e5877adcfd1893a20
Parents: ebb0430
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Mon Jul 17 10:24:06 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:30 2017 +0530

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/BlockManager.java       |  2 +-
 .../server/datanode/StoragePolicySatisfyWorker.java     |  6 +++---
 .../hdfs/server/namenode/StoragePolicySatisfier.java    |  6 +++---
 .../hadoop/hdfs/server/protocol/DatanodeProtocol.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/mover/TestMover.java  |  7 ++++---
 .../server/namenode/TestStoragePolicySatisfier.java     | 12 ++++++------
 6 files changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac4d9b0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 2a26c35..441677f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -417,7 +417,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   private final BlockIdManager blockIdManager;
 
-  /** For satisfying block storage policies */
+  /** For satisfying block storage policies. */
   private final StoragePolicySatisfier sps;
   private final BlockStorageMovementNeeded storageMovementNeeded =
       new BlockStorageMovementNeeded();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac4d9b0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index f4f97dd..196cd58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker {
   /**
    * Block movement status code.
    */
-  public static enum BlockMovementStatus {
+  public enum BlockMovementStatus {
     /** Success. */
     DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
     /**
@@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker {
 
     private final int code;
 
-    private BlockMovementStatus(int code) {
+    BlockMovementStatus(int code) {
       this.code = code;
     }
 
@@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker {
     private final DatanodeInfo target;
     private final BlockMovementStatus status;
 
-    public BlockMovementResult(long trackId, long blockId,
+    BlockMovementResult(long trackId, long blockId,
         DatanodeInfo target, BlockMovementStatus status) {
       this.trackId = trackId;
       this.blockId = blockId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac4d9b0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 00b4cd0..af3b7f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   private static class StorageTypeNodePair {
-    public StorageType storageType = null;
-    public DatanodeDescriptor dn = null;
+    private StorageType storageType = null;
+    private DatanodeDescriptor dn = null;
 
-    public StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) {
+    StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) {
       this.storageType = storageType;
       this.dn = dn;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac4d9b0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index 892efb3..5e1f148 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -79,9 +79,8 @@ public interface DatanodeProtocol {
   final static int DNA_CACHE = 9;      // cache blocks
   final static int DNA_UNCACHE = 10;   // uncache blocks
   final static int DNA_ERASURE_CODING_RECONSTRUCTION = 11; // erasure coding reconstruction command
-  final static int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command
-  final static int DNA_DROP_SPS_WORK_COMMAND = 13; // block storage movement
-                                                   // command
+  int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command
+  int DNA_DROP_SPS_WORK_COMMAND = 13; // drop sps work command
 
   /** 
    * Register Datanode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac4d9b0d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index 7bcc9c2..c4f9bfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -125,9 +125,10 @@ public class TestMover {
       nnMap.put(nn, null);
     }
 
-    final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
-        nnMap, Mover.class.getSimpleName(), HdfsServerConstants.MOVER_ID_PATH, conf,
-        NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
+    final List<NameNodeConnector> nncs = NameNodeConnector.
+        newNameNodeConnectors(nnMap, Mover.class.getSimpleName(),
+            HdfsServerConstants.MOVER_ID_PATH, conf,
+            NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
     return new Mover(nncs.get(0), conf, new AtomicInteger(0), new HashMap<>());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac4d9b0d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 10ceae7..7f96003 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -496,8 +496,8 @@ public class TestStoragePolicySatisfier {
       namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
       hdfsCluster.triggerHeartbeats();
 
-      // No block movement will be scheduled as there is no target node available
-      // with the required storage type.
+      // No block movement will be scheduled as there is no target node
+      // available with the required storage type.
       waitForAttemptedItems(1, 30000);
       DFSTestUtil.waitExpectedStorageType(
           file, StorageType.DISK, 3, 30000, dfs);
@@ -1174,14 +1174,14 @@ public class TestStoragePolicySatisfier {
 
   private void startAdditionalDNs(final Configuration conf,
       int newNodesRequired, int existingNodesNum, StorageType[][] newTypes,
-      int storagesPerDatanode, long capacity, final MiniDFSCluster cluster)
+      int storagesPerDn, long nodeCapacity, final MiniDFSCluster cluster)
           throws IOException {
     long[][] capacities;
     existingNodesNum += newNodesRequired;
-    capacities = new long[newNodesRequired][storagesPerDatanode];
+    capacities = new long[newNodesRequired][storagesPerDn];
     for (int i = 0; i < newNodesRequired; i++) {
-      for (int j = 0; j < storagesPerDatanode; j++) {
-        capacities[i][j] = capacity;
+      for (int j = 0; j < storagesPerDn; j++) {
+        capacities[i][j] = nodeCapacity;
       }
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDFS-10884: [SPS]: Add block movement tracker to track the completion of block movement future tasks at DN. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-10884: [SPS]: Add block movement tracker to track the completion of block movement future tasks at DN. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5058b09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5058b09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5058b09

Branch: refs/heads/HDFS-10285
Commit: d5058b09f2f5abb725559f9dd26f29a275978243
Parents: 1b7b964
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Tue Oct 25 00:40:45 2016 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:13 2017 +0530

----------------------------------------------------------------------
 .../datanode/BlockStorageMovementTracker.java   | 146 ++++++++++++++
 .../datanode/StoragePolicySatisfyWorker.java    | 178 +++++++++++++----
 .../protocol/BlockStorageMovementCommand.java   |  12 +-
 .../TestStoragePolicySatisfyWorker.java         | 190 +++++++++++++------
 4 files changed, 427 insertions(+), 99 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5058b09/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
new file mode 100644
index 0000000..d31f075
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlocksMovementsCompletionHandler;
+import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class is used to track the completion of block movement future tasks.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class BlockStorageMovementTracker implements Runnable {
+  private static final Logger LOG = LoggerFactory
+      .getLogger(BlockStorageMovementTracker.class);
+  private final CompletionService<BlockMovementResult> moverCompletionService;
+  private final BlocksMovementsCompletionHandler blksMovementscompletionHandler;
+
+  // Keeps the information - trackID vs its list of blocks
+  private final Map<Long, List<Future<BlockMovementResult>>> moverTaskFutures;
+  private final Map<Long, List<BlockMovementResult>> movementResults;
+
+  /**
+   * BlockStorageMovementTracker constructor.
+   *
+   * @param moverCompletionService
+   *          completion service.
+   * @param handler
+   *          blocks movements completion handler
+   */
+  public BlockStorageMovementTracker(
+      CompletionService<BlockMovementResult> moverCompletionService,
+      BlocksMovementsCompletionHandler handler) {
+    this.moverCompletionService = moverCompletionService;
+    this.moverTaskFutures = new HashMap<>();
+    this.blksMovementscompletionHandler = handler;
+    this.movementResults = new HashMap<>();
+  }
+
+  @Override
+  public void run() {
+    while (true) {
+      if (moverTaskFutures.size() <= 0) {
+        try {
+          synchronized (moverTaskFutures) {
+            // Waiting for mover tasks.
+            moverTaskFutures.wait(2000);
+          }
+        } catch (InterruptedException ignore) {
+          // ignore
+        }
+      }
+      try {
+        Future<BlockMovementResult> future = moverCompletionService.take();
+        if (future != null) {
+          BlockMovementResult result = future.get();
+          LOG.debug("Completed block movement. {}", result);
+          long trackId = result.getTrackId();
+          List<Future<BlockMovementResult>> blocksMoving = moverTaskFutures
+              .get(trackId);
+          blocksMoving.remove(future);
+
+          List<BlockMovementResult> resultPerTrackIdList =
+              addMovementResultToTrackIdList(result);
+
+          // Completed all the scheduled blocks movement under this 'trackId'.
+          if (blocksMoving.isEmpty()) {
+            synchronized (moverTaskFutures) {
+              moverTaskFutures.remove(trackId);
+            }
+            // handle completed blocks movements per trackId.
+            blksMovementscompletionHandler.handle(resultPerTrackIdList);
+            movementResults.remove(trackId);
+          }
+        }
+      } catch (ExecutionException | InterruptedException e) {
+        // TODO: Do we need failure retries and implement the same if required.
+        LOG.error("Exception while moving block replica to target storage type",
+            e);
+      }
+    }
+  }
+
+  private List<BlockMovementResult> addMovementResultToTrackIdList(
+      BlockMovementResult result) {
+    long trackId = result.getTrackId();
+    List<BlockMovementResult> perTrackIdList = movementResults.get(trackId);
+    if (perTrackIdList == null) {
+      perTrackIdList = new ArrayList<>();
+      movementResults.put(trackId, perTrackIdList);
+    }
+    perTrackIdList.add(result);
+    return perTrackIdList;
+  }
+
+  /**
+   * Add future task to the tracking list to check the completion status of the
+   * block movement.
+   *
+   * @param trackID
+   *          tracking Id
+   * @param futureTask
+   *          future task used for moving the respective block
+   */
+  void addBlock(long trackID, Future<BlockMovementResult> futureTask) {
+    synchronized (moverTaskFutures) {
+      List<Future<BlockMovementResult>> futures = moverTaskFutures
+          .get(Long.valueOf(trackID));
+      // null for the first task
+      if (futures == null) {
+        futures = new ArrayList<>();
+        moverTaskFutures.put(trackID, futures);
+      }
+      futures.add(futureTask);
+      // Notify waiting tracker thread about the newly added tasks.
+      moverTaskFutures.notify();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5058b09/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 2c99963..604fb4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -33,7 +33,6 @@ import java.util.EnumSet;
 import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
@@ -65,6 +64,8 @@ import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * StoragePolicySatisfyWorker handles the storage policy satisfier commands.
  * These commands would be issued from NameNode as part of Datanode's heart beat
@@ -82,8 +83,10 @@ public class StoragePolicySatisfyWorker {
 
   private final int moverThreads;
   private final ExecutorService moveExecutor;
-  private final CompletionService<Void> moverExecutorCompletionService;
-  private final List<Future<Void>> moverTaskFutures;
+  private final CompletionService<BlockMovementResult> moverCompletionService;
+  private final BlocksMovementsCompletionHandler handler;
+  private final BlockStorageMovementTracker movementTracker;
+  private Daemon movementTrackerThread;
 
   public StoragePolicySatisfyWorker(Configuration conf, DataNode datanode) {
     this.datanode = datanode;
@@ -92,9 +95,13 @@ public class StoragePolicySatisfyWorker {
     moverThreads = conf.getInt(DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
         DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
     moveExecutor = initializeBlockMoverThreadPool(moverThreads);
-    moverExecutorCompletionService = new ExecutorCompletionService<>(
-        moveExecutor);
-    moverTaskFutures = new ArrayList<>();
+    moverCompletionService = new ExecutorCompletionService<>(moveExecutor);
+    handler = new BlocksMovementsCompletionHandler();
+    movementTracker = new BlockStorageMovementTracker(moverCompletionService,
+        handler);
+    movementTrackerThread = new Daemon(movementTracker);
+    movementTrackerThread.setName("BlockStorageMovementTracker");
+    movementTrackerThread.start();
     // TODO: Needs to manage the number of concurrent moves per DataNode.
   }
 
@@ -133,10 +140,6 @@ public class StoragePolicySatisfyWorker {
    * separate thread. Each task will move the block replica to the target node &
    * wait for the completion.
    *
-   * TODO: Presently this function is a blocking call, this has to be refined by
-   * moving the tracking logic to another tracker thread. HDFS-10884 jira
-   * addresses the same.
-   *
    * @param trackID
    *          unique tracking identifier
    * @param blockPoolID
@@ -146,68 +149,64 @@ public class StoragePolicySatisfyWorker {
    */
   public void processBlockMovingTasks(long trackID, String blockPoolID,
       Collection<BlockMovingInfo> blockMovingInfos) {
-    Future<Void> moveCallable = null;
     for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
       assert blkMovingInfo
           .getSources().length == blkMovingInfo.getTargets().length;
 
       for (int i = 0; i < blkMovingInfo.getSources().length; i++) {
         BlockMovingTask blockMovingTask = new BlockMovingTask(
-            blkMovingInfo.getBlock(), blockPoolID,
+            trackID, blockPoolID, blkMovingInfo.getBlock(),
             blkMovingInfo.getSources()[i], blkMovingInfo.getTargets()[i],
+            blkMovingInfo.getSourceStorageTypes()[i],
             blkMovingInfo.getTargetStorageTypes()[i]);
-        moveCallable = moverExecutorCompletionService.submit(blockMovingTask);
-        moverTaskFutures.add(moveCallable);
-      }
-    }
-
-    for (int i = 0; i < moverTaskFutures.size(); i++) {
-      try {
-        moveCallable = moverExecutorCompletionService.take();
-        moveCallable.get();
-      } catch (InterruptedException | ExecutionException e) {
-        // TODO: Failure retries and report back the error to NameNode.
-        LOG.error("Exception while moving block replica to target storage type",
-            e);
+        Future<BlockMovementResult> moveCallable = moverCompletionService
+            .submit(blockMovingTask);
+        movementTracker.addBlock(trackID, moveCallable);
       }
     }
   }
 
   /**
    * This class encapsulates the process of moving the block replica to the
-   * given target.
+   * given target and wait for the response.
    */
-  private class BlockMovingTask implements Callable<Void> {
+  private class BlockMovingTask implements Callable<BlockMovementResult> {
+    private final long trackID;
+    private final String blockPoolID;
     private final Block block;
     private final DatanodeInfo source;
     private final DatanodeInfo target;
+    private final StorageType srcStorageType;
     private final StorageType targetStorageType;
-    private String blockPoolID;
 
-    BlockMovingTask(Block block, String blockPoolID, DatanodeInfo source,
-        DatanodeInfo target, StorageType targetStorageType) {
-      this.block = block;
+    BlockMovingTask(long trackID, String blockPoolID, Block block,
+        DatanodeInfo source, DatanodeInfo target,
+        StorageType srcStorageType, StorageType targetStorageType) {
+      this.trackID = trackID;
       this.blockPoolID = blockPoolID;
+      this.block = block;
       this.source = source;
       this.target = target;
+      this.srcStorageType = srcStorageType;
       this.targetStorageType = targetStorageType;
     }
 
     @Override
-    public Void call() {
-      moveBlock();
-      return null;
+    public BlockMovementResult call() {
+      BlockMovementStatus status = moveBlock();
+      return new BlockMovementResult(trackID, block.getBlockId(), target,
+          status);
     }
 
-    private void moveBlock() {
-      LOG.info("Start moving block {}", block);
-
-      LOG.debug("Start moving block:{} from src:{} to destin:{} to satisfy "
-          + "storageType:{}", block, source, target, targetStorageType);
+    private BlockMovementStatus moveBlock() {
+      LOG.info("Start moving block:{} from src:{} to destin:{} to satisfy "
+              + "storageType, sourceStoragetype:{} and destinStoragetype:{}",
+          block, source, target, srcStorageType, targetStorageType);
       Socket sock = null;
       DataOutputStream out = null;
       DataInputStream in = null;
       try {
+        ExtendedBlock extendedBlock = new ExtendedBlock(blockPoolID, block);
         DNConf dnConf = datanode.getDnConf();
         String dnAddr = target.getXferAddr(dnConf.getConnectToDnViaHostname());
         sock = datanode.newSocket();
@@ -218,7 +217,6 @@ public class StoragePolicySatisfyWorker {
 
         OutputStream unbufOut = sock.getOutputStream();
         InputStream unbufIn = sock.getInputStream();
-        ExtendedBlock extendedBlock = new ExtendedBlock(blockPoolID, block);
         Token<BlockTokenIdentifier> accessToken = datanode.getBlockAccessToken(
             extendedBlock, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
 
@@ -239,12 +237,14 @@ public class StoragePolicySatisfyWorker {
             "Successfully moved block:{} from src:{} to destin:{} for"
                 + " satisfying storageType:{}",
             block, source, target, targetStorageType);
+        return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
       } catch (IOException e) {
         // TODO: handle failure retries
         LOG.warn(
             "Failed to move block:{} from src:{} to destin:{} to satisfy "
                 + "storageType:{}",
             block, source, target, targetStorageType, e);
+        return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE;
       } finally {
         IOUtils.closeStream(out);
         IOUtils.closeStream(in);
@@ -272,4 +272,102 @@ public class StoragePolicySatisfyWorker {
       DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
     }
   }
+
+  /**
+   * Block movement status code.
+   */
+  enum BlockMovementStatus {
+    /** Success. */
+    DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
+    /**
+     * Failure due to generation time stamp mismatches or network errors
+     * or no available space.
+     */
+    DN_BLK_STORAGE_MOVEMENT_FAILURE(-1);
+
+    // TODO: need to support different type of failures. Failure due to network
+    // errors, block pinned, no space available etc.
+
+    private final int code;
+
+    private BlockMovementStatus(int code) {
+      this.code = code;
+    }
+
+    /**
+     * @return the status code.
+     */
+    int getStatusCode() {
+      return code;
+    }
+  }
+
+  /**
+   * This class represents result from a block movement task. This will have the
+   * information of the task which was successful or failed due to errors.
+   */
+  static class BlockMovementResult {
+    private final long trackId;
+    private final long blockId;
+    private final DatanodeInfo target;
+    private final BlockMovementStatus status;
+
+    public BlockMovementResult(long trackId, long blockId,
+        DatanodeInfo target, BlockMovementStatus status) {
+      this.trackId = trackId;
+      this.blockId = blockId;
+      this.target = target;
+      this.status = status;
+    }
+
+    long getTrackId() {
+      return trackId;
+    }
+
+    long getBlockId() {
+      return blockId;
+    }
+
+    BlockMovementStatus getStatus() {
+      return status;
+    }
+
+    @Override
+    public String toString() {
+      return new StringBuilder().append("Block movement result(\n  ")
+          .append("track id: ").append(trackId).append(" block id: ")
+          .append(blockId).append(" target node: ").append(target)
+          .append(" movement status: ").append(status).append(")").toString();
+    }
+  }
+
+  /**
+   * Blocks movements completion handler, which is used to collect details of
+   * the completed list of block movements and notify the namenode about the
+   * success or failures.
+   */
+  static class BlocksMovementsCompletionHandler {
+    private final List<BlockMovementResult> completedBlocks = new ArrayList<>();
+
+    /**
+     * Collect all the block movement results and notify namenode.
+     *
+     * @param results
+     *          result of all the block movements per trackId
+     */
+    void handle(List<BlockMovementResult> results) {
+      completedBlocks.addAll(results);
+      // TODO: notify namenode about the success/failures.
+    }
+
+    @VisibleForTesting
+    List<BlockMovementResult> getCompletedBlocks() {
+      return completedBlocks;
+    }
+  }
+
+  @VisibleForTesting
+  BlocksMovementsCompletionHandler getBlocksMovementsCompletionHandler() {
+    return handler;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5058b09/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
index 7c97f1a..5dcf4e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
@@ -35,10 +35,10 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
  * service. After the block movement this DataNode sends response back to the
  * NameNode about the movement status.
  *
- * The coordinator datanode will use 'trackId' identifier to coordinate the block
- * movement of the given set of blocks. TrackId is a unique identifier that
- * represents a group of blocks. Namenode will generate this unique value and
- * send it to the coordinator datanode along with the
+ * The coordinator datanode will use 'trackId' identifier to coordinate the
+ * block movement of the given set of blocks. TrackId is a unique identifier
+ * that represents a group of blocks. Namenode will generate this unique value
+ * and send it to the coordinator datanode along with the
  * BlockStorageMovementCommand. Datanode will monitor the completion of the
  * block movements that grouped under this trackId and notifies Namenode about
  * the completion status.
@@ -153,11 +153,11 @@ public class BlockStorageMovementCommand extends DatanodeCommand {
       return new StringBuilder().append("BlockMovingInfo(\n  ")
           .append("Moving block: ").append(blk).append(" From: ")
           .append(Arrays.asList(sourceNodes)).append(" To: [")
-          .append(Arrays.asList(targetNodes)).append(")\n")
+          .append(Arrays.asList(targetNodes)).append("\n  ")
           .append(" sourceStorageTypes: ")
           .append(Arrays.toString(sourceStorageTypes))
           .append(" targetStorageTypes: ")
-          .append(Arrays.toString(targetStorageTypes)).toString();
+          .append(Arrays.toString(targetStorageTypes)).append(")").toString();
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5058b09/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
index d803f1a..ea3eec3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -33,10 +34,15 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementResult;
+import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -51,8 +57,9 @@ public class TestStoragePolicySatisfyWorker {
 
   private static final Logger LOG = LoggerFactory
       .getLogger(TestStoragePolicySatisfyWorker.class);
-
   private static final int DEFAULT_BLOCK_SIZE = 100;
+  private MiniDFSCluster cluster = null;
+  private final Configuration conf = new HdfsConfiguration();
 
   private static void initConf(Configuration conf) {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
@@ -63,64 +70,141 @@ public class TestStoragePolicySatisfyWorker {
     conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
   }
 
+  @Before
+  public void setUp() throws IOException {
+    initConf(conf);
+  }
+
+  @After
+  public void teardown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Tests to verify that the block replica is moving to ARCHIVE storage type to
    * fulfill the storage policy requirement.
    */
   @Test(timeout = 120000)
   public void testMoveSingleBlockToAnotherDatanode() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    initConf(conf);
-    final MiniDFSCluster cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(4)
-            .storageTypes(
-                new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
-                    {StorageType.DISK, StorageType.ARCHIVE},
-                    {StorageType.ARCHIVE, StorageType.ARCHIVE},
-                    {StorageType.ARCHIVE, StorageType.ARCHIVE}})
-            .build();
-    try {
-      cluster.waitActive();
-      final DistributedFileSystem dfs = cluster.getFileSystem();
-      final String file = "/testMoveSingleBlockToAnotherDatanode";
-      // write to DISK
-      final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
-      out.writeChars("testMoveSingleBlockToAnotherDatanode");
-      out.close();
-
-      // verify before movement
-      LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
-      StorageType[] storageTypes = lb.getStorageTypes();
-      for (StorageType storageType : storageTypes) {
-        Assert.assertTrue(StorageType.DISK == storageType);
-      }
-      // move to ARCHIVE
-      dfs.setStoragePolicy(new Path(file), "COLD");
-
-      lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
-      DataNode src = cluster.getDataNodes().get(3);
-      DatanodeInfo targetDnInfo = DFSTestUtil
-          .getLocalDatanodeInfo(src.getXferPort());
-
-      // TODO: Need to revisit this when NN is implemented to be able to send
-      // block moving commands.
-      StoragePolicySatisfyWorker worker = new StoragePolicySatisfyWorker(conf,
-          src);
-      List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
-      BlockMovingInfo blockMovingInfo = prepareBlockMovingInfo(
-          lb.getBlock().getLocalBlock(), lb.getLocations()[0], targetDnInfo,
-          lb.getStorageTypes()[0], StorageType.ARCHIVE);
-      blockMovingInfos.add(blockMovingInfo);
-      INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
-      worker.processBlockMovingTasks(inode.getId(),
-          cluster.getNamesystem().getBlockPoolId(), blockMovingInfos);
-      cluster.triggerHeartbeats();
-
-      // Wait till NameNode notified about the block location details
-      waitForLocatedBlockWithArchiveStorageType(dfs, file, 1, 30000);
-    } finally {
-      cluster.shutdown();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
+        .storageTypes(
+            new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
+                {StorageType.DISK, StorageType.ARCHIVE},
+                {StorageType.ARCHIVE, StorageType.ARCHIVE},
+                {StorageType.ARCHIVE, StorageType.ARCHIVE}})
+        .build();
+    cluster.waitActive();
+    final DistributedFileSystem dfs = cluster.getFileSystem();
+    final String file = "/testMoveSingleBlockToAnotherDatanode";
+    // write to DISK
+    final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
+    out.writeChars("testMoveSingleBlockToAnotherDatanode");
+    out.close();
+
+    // verify before movement
+    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
+    StorageType[] storageTypes = lb.getStorageTypes();
+    for (StorageType storageType : storageTypes) {
+      Assert.assertTrue(StorageType.DISK == storageType);
+    }
+    // move to ARCHIVE
+    dfs.setStoragePolicy(new Path(file), "COLD");
+
+    FSNamesystem namesystem = cluster.getNamesystem();
+    INode inode = namesystem.getFSDirectory().getINode(file);
+    namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+
+    cluster.triggerHeartbeats();
+
+    // Wait till NameNode notified about the block location details
+    waitForLocatedBlockWithArchiveStorageType(dfs, file, 2, 30000);
+  }
+
+  /**
+   * Test to verify that satisfy worker can't move blocks. If specified target
+   * datanode doesn't have enough space to accommodate the moving block.
+   */
+  @Test(timeout = 120000)
+  public void testMoveWithNoSpaceAvailable() throws Exception {
+    final long capacity = 150;
+    final String rack0 = "/rack0";
+    final String rack1 = "/rack1";
+    long[] capacities = new long[] {capacity, capacity, capacity / 2};
+    String[] hosts = {"host0", "host1", "host2"};
+    String[] racks = {rack0, rack1, rack0};
+    int numOfDatanodes = capacities.length;
+
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes)
+        .hosts(hosts).racks(racks).simulatedCapacities(capacities)
+        .storageTypes(
+            new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
+                {StorageType.DISK, StorageType.ARCHIVE},
+                {StorageType.ARCHIVE, StorageType.ARCHIVE}})
+        .build();
+
+    cluster.waitActive();
+    InetSocketAddress[] favoredNodes = new InetSocketAddress[3];
+    for (int i = 0; i < favoredNodes.length; i++) {
+      // DFSClient will attempt reverse lookup. In case it resolves
+      // "127.0.0.1" to "localhost", we manually specify the hostname.
+      favoredNodes[i] = cluster.getDataNodes().get(i).getXferAddress();
+    }
+    final DistributedFileSystem dfs = cluster.getFileSystem();
+    final String file = "/testMoveWithNoSpaceAvailable";
+    DFSTestUtil.createFile(dfs, new Path(file), false, 1024, 100,
+        DEFAULT_BLOCK_SIZE, (short) 2, 0, false, favoredNodes);
+
+    // verify before movement
+    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
+    StorageType[] storageTypes = lb.getStorageTypes();
+    for (StorageType storageType : storageTypes) {
+      Assert.assertTrue(StorageType.DISK == storageType);
     }
+
+    // move to ARCHIVE
+    dfs.setStoragePolicy(new Path(file), "COLD");
+
+    lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
+    DataNode src = cluster.getDataNodes().get(2);
+    DatanodeInfo targetDnInfo = DFSTestUtil
+        .getLocalDatanodeInfo(src.getXferPort());
+
+    StoragePolicySatisfyWorker worker = new StoragePolicySatisfyWorker(conf,
+        src);
+    List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
+    BlockMovingInfo blockMovingInfo = prepareBlockMovingInfo(
+        lb.getBlock().getLocalBlock(), lb.getLocations()[0], targetDnInfo,
+        lb.getStorageTypes()[0], StorageType.ARCHIVE);
+    blockMovingInfos.add(blockMovingInfo);
+    INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
+    worker.processBlockMovingTasks(inode.getId(),
+        cluster.getNamesystem().getBlockPoolId(), blockMovingInfos);
+
+    waitForBlockMovementCompletion(worker, inode.getId(), 1, 30000);
+  }
+
+  private void waitForBlockMovementCompletion(
+      final StoragePolicySatisfyWorker worker, final long inodeId,
+      int expectedFailedItemsCount, int timeout) throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        List<BlockMovementResult> completedBlocks = worker
+            .getBlocksMovementsCompletionHandler().getCompletedBlocks();
+        int failedCount = 0;
+        for (BlockMovementResult blockMovementResult : completedBlocks) {
+          if (BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE ==
+              blockMovementResult.getStatus()) {
+            failedCount++;
+          }
+        }
+        LOG.info("Block movement completed count={}, expected={} and actual={}",
+            completedBlocks.size(), expectedFailedItemsCount, failedCount);
+        return expectedFailedItemsCount == failedCount;
+      }
+    }, 100, timeout);
   }
 
   private void waitForLocatedBlockWithArchiveStorageType(
@@ -150,7 +234,7 @@ public class TestStoragePolicySatisfyWorker {
     }, 100, timeout);
   }
 
-  BlockMovingInfo prepareBlockMovingInfo(Block block,
+  private BlockMovingInfo prepareBlockMovingInfo(Block block,
       DatanodeInfo src, DatanodeInfo destin, StorageType storageType,
       StorageType targetStorageType) {
     return new BlockMovingInfo(block, new DatanodeInfo[] {src},


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. Contributed by Rakesh R.

Posted by ra...@apache.org.
HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5704935
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5704935
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5704935

Branch: refs/heads/HDFS-10285
Commit: f57049359a62d5a3d9b63fbba34d1a87e5c87930
Parents: 1e17983
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Thu Aug 17 13:21:07 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 12:03:39 2017 +0530

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/bin/hdfs               |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../server/blockmanagement/BlockManager.java    | 104 +++++++++++--------
 .../BlockStorageMovementAttemptedItems.java     |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  20 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  22 ++--
 .../server/namenode/StoragePolicySatisfier.java |  20 ++--
 .../protocol/BlocksStorageMovementResult.java   |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  11 +-
 .../src/main/resources/hdfs-default.xml         |  10 +-
 .../src/site/markdown/ArchivalStorage.md        |  14 +--
 .../src/site/markdown/HDFSCommands.md           |   2 +-
 .../TestStoragePolicySatisfyWorker.java         |   2 +-
 .../hadoop/hdfs/server/mover/TestMover.java     |  22 ++--
 .../hdfs/server/mover/TestStorageMover.java     |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../namenode/TestNameNodeReconfigure.java       |  99 ++++++++++++------
 .../TestPersistentStoragePolicySatisfier.java   |   6 +-
 .../namenode/TestStoragePolicySatisfier.java    |  35 +++++--
 .../TestStoragePolicySatisfierWithHA.java       |  10 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   8 ++
 .../hdfs/tools/TestStoragePolicyCommands.java   |  21 ++--
 22 files changed, 265 insertions(+), 161 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index e6405b5..2c1c6ec 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -59,7 +59,7 @@ function hadoop_usage
   hadoop_add_subcommand "portmap" daemon "run a portmap service"
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot"
-  hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies"
+  hadoop_add_subcommand "storagepolicies" admin "list/get/set/satisfyStoragePolicy block storage policies"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d8c53ab..2aebb6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -541,10 +541,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int    DFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute
 
   // SPS related configurations
-  public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
-      "dfs.storage.policy.satisfier.activate";
-  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
-      true;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY =
+      "dfs.storage.policy.satisfier.enabled";
+  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
+      false;
   public static final String DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
       "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 441677f..3b185cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -419,6 +419,8 @@ public class BlockManager implements BlockStatsMXBean {
 
   /** For satisfying block storage policies. */
   private final StoragePolicySatisfier sps;
+  private final boolean storagePolicyEnabled;
+  private boolean spsEnabled;
   private final BlockStorageMovementNeeded storageMovementNeeded =
       new BlockStorageMovementNeeded();
 
@@ -427,15 +429,9 @@ public class BlockManager implements BlockStatsMXBean {
    */
   private final short minReplicationToBeInMaintenance;
 
-  /**
-   * Whether HA is enabled.
-   */
-  private final boolean haEnabled;
-
   public BlockManager(final Namesystem namesystem, boolean haEnabled,
       final Configuration conf) throws IOException {
     this.namesystem = namesystem;
-    this.haEnabled = haEnabled;
     datanodeManager = new DatanodeManager(this, namesystem, conf);
     heartbeatManager = datanodeManager.getHeartbeatManager();
     this.blockIdManager = new BlockIdManager(this);
@@ -463,24 +459,15 @@ public class BlockManager implements BlockStatsMXBean {
         DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT)
         * 1000L);
 
-    final boolean storagePolicyEnabled =
+    storagePolicyEnabled =
         conf.getBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
             DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
-    final boolean spsEnabled =
+    spsEnabled =
         conf.getBoolean(
-            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
-            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT);
-    if (storagePolicyEnabled && spsEnabled) {
-      sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
-          conf);
-    } else {
-      sps = null;
-      LOG.warn(
-          "Failed to start StoragePolicySatisfier"
-              + " since {} set to {} and {} set to {}.",
-          DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled,
-          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, spsEnabled);
-    }
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
+    sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
+        conf);
     blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
     this.maxCorruptFilesReturned = conf.getInt(
@@ -700,15 +687,10 @@ public class BlockManager implements BlockStatsMXBean {
     this.blockReportThread.start();
     mxBeanName = MBeans.register("NameNode", "BlockStats", this);
     bmSafeMode.activate(blockTotal);
-    if (sps != null && !haEnabled) {
-      sps.start(false);
-    }
   }
 
   public void close() {
-    if (sps != null) {
-      sps.deactivate(false);
-    }
+    stopSPS(false);
     bmSafeMode.close();
     try {
       redundancyThread.interrupt();
@@ -4910,46 +4892,86 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Activate the storage policy satisfier by starting its service.
+   * Start storage policy satisfier service.
    */
-  public void activateSPS() {
-    if (sps == null) {
-      LOG.info("Storage policy satisfier is not initialized.");
+  public void startSPS() {
+    if (!(storagePolicyEnabled && spsEnabled)) {
+      LOG.info(
+          "Failed to start StoragePolicySatisfier "
+              + " as {} set to {} and {} set to {}.",
+          DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled,
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, spsEnabled);
       return;
     } else if (sps.isRunning()) {
       LOG.info("Storage policy satisfier is already running.");
       return;
     }
 
-    sps.start(true);
+    sps.start(false);
   }
 
   /**
-   * Deactivate the storage policy satisfier by stopping its services.
+   * Stop storage policy satisfier service.
+   *
+   * @param forceStop
+   *          true represents that it should stop SPS service by clearing all
+   *          pending SPS work
    */
-  public void deactivateSPS() {
-    if (sps == null) {
-      LOG.info("Storage policy satisfier is not initialized.");
+  public void stopSPS(boolean forceStop) {
+    if (!(storagePolicyEnabled && spsEnabled)) {
+      LOG.info("Storage policy satisfier is not enabled.");
       return;
     } else if (!sps.isRunning()) {
       LOG.info("Storage policy satisfier is already stopped.");
       return;
     }
-    sps.deactivate(true);
+
+    sps.disable(forceStop);
+  }
+
+  /**
+   * Enable storage policy satisfier by starting its service.
+   */
+  public void enableSPS() {
+    if (!storagePolicyEnabled){
+      LOG.info("Failed to start StoragePolicySatisfier as {} set to {}.",
+          DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled);
+      return;
+    }
+    spsEnabled = true;
+    if (sps.isRunning()) {
+      LOG.info("Storage policy satisfier is already running.");
+      return;
+    }
+
+    sps.start(true);
+  }
+
+  /**
+   * Disable the storage policy satisfier by stopping its services.
+   */
+  public void disableSPS() {
+    spsEnabled = false;
+    if (!sps.isRunning()) {
+      LOG.info("Storage policy satisfier is already stopped.");
+      return;
+    }
+
+    LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
+        + "stop it.");
+    sps.disable(true);
   }
 
   /**
    * Timed wait to stop storage policy satisfier daemon threads.
    */
   public void stopSPSGracefully() {
-    if (sps != null) {
-      sps.stopGracefully();
-    }
+    sps.stopGracefully();
   }
   /**
    * @return True if storage policy satisfier running.
    */
   public boolean isStoragePolicySatisfierRunning() {
-    return sps == null ? false : sps.isRunning();
+    return sps.isRunning();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 6048986..37833e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -136,7 +136,7 @@ public class BlockStorageMovementAttemptedItems {
    * Sets running flag to false. Also, this will interrupt monitor thread and
    * clear all the queued up tasks.
    */
-  public synchronized void deactivate() {
+  public synchronized void stop() {
     monitorRunning = false;
     if (timerThread != null) {
       timerThread.interrupt();
@@ -152,7 +152,7 @@ public class BlockStorageMovementAttemptedItems {
       return;
     }
     if (monitorRunning) {
-      deactivate();
+      stop();
     }
     try {
       timerThread.join(3000);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7751921..57bfa2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1275,7 +1275,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             edekCacheLoaderDelay, edekCacheLoaderInterval);
       }
 
-      blockManager.activateSPS();
+      blockManager.startSPS();
     } finally {
       startingActiveService = false;
       blockManager.checkSafeMode();
@@ -1306,7 +1306,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     writeLock();
     try {
       if (blockManager != null) {
-        blockManager.deactivateSPS();
+        blockManager.stopSPS(true);
       }
       stopSecretManager();
       leaseManager.stopMonitor();
@@ -2152,6 +2152,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void satisfyStoragePolicy(String src, boolean logRetryCache)
       throws IOException {
+    final String operationName = "satisfyStoragePolicy";
+    FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
@@ -2169,16 +2171,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           || !blockManager.getStoragePolicySatisfier().isRunning()) {
         throw new UnsupportedActionException(
             "Cannot request to satisfy storage policy "
-                + "when storage policy satisfier feature has been deactivated"
-                + " by admin. Seek for an admin help to activate it "
+                + "when storage policy satisfier feature has been disabled"
+                + " by admin. Seek for an admin help to enable it "
                 + "or use Mover tool.");
       }
-      FSDirSatisfyStoragePolicyOp.satisfyStoragePolicy(dir, blockManager, src,
-          logRetryCache);
+      auditStat = FSDirSatisfyStoragePolicyOp.satisfyStoragePolicy(
+          dir, blockManager, src, logRetryCache);
+    } catch (AccessControlException e) {
+      logAuditEvent(false, operationName, src);
+      throw e;
     } finally {
-      writeUnlock();
+      writeUnlock(operationName);
     }
     getEditLog().logSync();
+    logAuditEvent(true, operationName, src, null, auditStat);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 7cc46f2..b7d03e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -157,7 +157,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAUL
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE;
@@ -290,7 +290,7 @@ public class NameNode extends ReconfigurableBase implements
           DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
           FS_PROTECTED_DIRECTORIES,
           HADOOP_CALLER_CONTEXT_ENABLED_KEY,
-          DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY));
+          DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY));
 
   private static final String USAGE = "Usage: hdfs namenode ["
       + StartupOption.BACKUP.getName() + "] | \n\t["
@@ -2036,8 +2036,8 @@ public class NameNode extends ReconfigurableBase implements
       return reconfCallerContextEnabled(newVal);
     } else if (property.equals(ipcClientRPCBackoffEnable)) {
       return reconfigureIPCBackoffEnabled(newVal);
-    } else if (property.equals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY)) {
-      return reconfigureSPSActivate(newVal, property);
+    } else if (property.equals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY)) {
+      return reconfigureSPSEnabled(newVal, property);
     } else {
       throw new ReconfigurationException(property, newVal, getConf().get(
           property));
@@ -2121,29 +2121,29 @@ public class NameNode extends ReconfigurableBase implements
     return Boolean.toString(clientBackoffEnabled);
   }
 
-  String reconfigureSPSActivate(String newVal, String property)
+  String reconfigureSPSEnabled(String newVal, String property)
       throws ReconfigurationException {
     if (newVal == null || !(newVal.equalsIgnoreCase(Boolean.TRUE.toString())
         || newVal.equalsIgnoreCase(Boolean.FALSE.toString()))) {
       throw new ReconfigurationException(property, newVal,
           getConf().get(property),
           new HadoopIllegalArgumentException(
-              "For activating or deactivating storage policy satisfier, "
+              "For enabling or disabling storage policy satisfier, "
                   + "we must pass true/false only"));
     }
 
     if (!isActiveState()) {
       throw new ReconfigurationException(property, newVal,
           getConf().get(property), new HadoopIllegalArgumentException(
-          "Activating or deactivating storage policy satisfier service on "
+          "Enabling or disabling storage policy satisfier service on "
               + state + " NameNode is not allowed"));
     }
 
-    boolean activateSPS = Boolean.parseBoolean(newVal);
-    if (activateSPS) {
-      namesystem.getBlockManager().activateSPS();
+    boolean enableSPS = Boolean.parseBoolean(newVal);
+    if (enableSPS) {
+      namesystem.getBlockManager().enableSPS();
     } else {
-      namesystem.getBlockManager().deactivateSPS();
+      namesystem.getBlockManager().disableSPS();
     }
     return newVal;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index af3b7f2..3165813 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -138,7 +138,7 @@ public class StoragePolicySatisfier implements Runnable {
     }
     if (reconfigStart) {
       LOG.info("Starting StoragePolicySatisfier, as admin requested to "
-          + "activate it.");
+          + "start it.");
     } else {
       LOG.info("Starting StoragePolicySatisfier.");
     }
@@ -154,23 +154,21 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   /**
-   * Deactivates storage policy satisfier by stopping its services.
+   * Disables storage policy satisfier by stopping its services.
    *
-   * @param reconfig
-   *          true represents deactivating SPS service as requested by admin,
-   *          false otherwise
+   * @param forceStop
+   *          true represents that it should stop SPS service by clearing all
+   *          pending SPS work
    */
-  public synchronized void deactivate(boolean reconfig) {
+  public synchronized void disable(boolean forceStop) {
     isRunning = false;
     if (storagePolicySatisfierThread == null) {
       return;
     }
 
     storagePolicySatisfierThread.interrupt();
-    this.storageMovementsMonitor.deactivate();
-    if (reconfig) {
-      LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
-          + "deactivate it.");
+    this.storageMovementsMonitor.stop();
+    if (forceStop) {
       this.clearQueuesWithNotification();
       addDropSPSWorkCommandsToAllDNs();
     } else {
@@ -183,7 +181,7 @@ public class StoragePolicySatisfier implements Runnable {
    */
   public synchronized void stopGracefully() {
     if (isRunning) {
-      deactivate(true);
+      disable(true);
     }
     this.storageMovementsMonitor.stopGracefully();
     if (storagePolicySatisfierThread == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
index b484eb1..7f749ec4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
@@ -40,7 +40,7 @@ public class BlocksStorageMovementResult {
    * IN_PROGRESS - If all or some of the blocks associated to track id are
    * still moving.
    */
-  public static enum Status {
+  public enum Status {
     SUCCESS, FAILURE, IN_PROGRESS;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index 30420ad..9b57874 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -233,8 +233,8 @@ public class StoragePolicyAdmin extends Configured implements Tool {
   }
 
   /** Command to schedule blocks to move based on specified policy. */
-  private static class SatisfyStoragePolicyCommand implements
-      AdminHelper.Command {
+  private static class SatisfyStoragePolicyCommand
+      implements AdminHelper.Command {
     @Override
     public String getName() {
       return "-satisfyStoragePolicy";
@@ -278,10 +278,11 @@ public class StoragePolicyAdmin extends Configured implements Tool {
   }
 
   /** Command to check storage policy satisfier status. */
-  private static class IsSPSRunningCommand implements AdminHelper.Command {
+  private static class IsSatisfierRunningCommand
+      implements AdminHelper.Command {
     @Override
     public String getName() {
-      return "-isSPSRunning";
+      return "-isSatisfierRunning";
     }
 
     @Override
@@ -370,6 +371,6 @@ public class StoragePolicyAdmin extends Configured implements Tool {
       new GetStoragePolicyCommand(),
       new UnsetStoragePolicyCommand(),
       new SatisfyStoragePolicyCommand(),
-      new IsSPSRunningCommand()
+      new IsSatisfierRunningCommand()
   };
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 356540c..48b913c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4277,13 +4277,13 @@
   </property>
 
 <property>
-  <name>dfs.storage.policy.satisfier.activate</name>
-  <value>true</value>
+  <name>dfs.storage.policy.satisfier.enabled</name>
+  <value>false</value>
   <description>
     If true, StoragePolicySatisfier will be started along with active namenode.
-    By default, StoragePolicySatisfier is activated.
-    Administrator can dynamically activate or deactivate StoragePolicySatisfier by using reconfiguration option.
-    Dynamic activation/deactivation option can be achieved in the following way.
+    By default, StoragePolicySatisfier is disabled.
+    Administrator can dynamically enable or disable StoragePolicySatisfier by using reconfiguration option.
+    Dynamic enabling/disabling option can be achieved in the following way.
     1. Edit/update this configuration property values in hdfs-site.xml
     2. Execute the reconfig command on hadoop command line prompt.
        For example:$hdfs -reconfig namenode nn_host:port start

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 668bb20..87817cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -106,7 +106,7 @@ Following 2 options will allow users to move the blocks based on new policy set.
 When user changes the storage policy on a file/directory, user can call `HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new policy set.
 The SPS daemon thread runs along with namenode and periodically scans for the storage mismatches between new policy set and the physical blocks placed. This will only track the files/directories for which user invoked satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, then it will schedule block movement tasks to datanodes. A Coordinator DataNode(C-DN) will track all block movements associated to a file and notify to namenode about movement success/failure. If there are any failures in movement, the SPS will re-attempt by sending new block movement task.
 
-SPS can be activated and deactivated dynamically without restarting the Namenode.
+SPS can be enabled and disabled dynamically without restarting the Namenode.
 
 Detailed design documentation can be found at [Storage Policy Satisfier(SPS) (HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
 
@@ -123,8 +123,8 @@ Detailed design documentation can be found at [Storage Policy Satisfier(SPS) (HD
 
 ####Configurations:
 
-*   **dfs.storage.policy.satisfier.activate** - Used to activate or deactivate SPS. Configuring true represents SPS is
-   activated and vice versa.
+*   **dfs.storage.policy.satisfier.enabled** - Used to enable or disable SPS. Configuring true represents SPS is
+   enabled and vice versa.
 
 *   **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to re-check the processed block storage movement
    command results from Co-ordinator Datanode.
@@ -151,7 +151,7 @@ Note that, when both -p and -f options are omitted, the default path is the root
 
 ####Administrator notes:
 
-`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a Mover instance is already triggered and running, SPS will be deactivated while starting. In that case, administrator should make sure, Mover execution finished and then activate SPS again. Similarly when SPS activated already, Mover cannot be run. If administrator is looking to run Mover tool explicitly, then he/she should make sure to deactivate SPS first and then run Mover. Please look at the commands section to know how to activate or deactivate SPS dynamically.
+`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a Mover instance is already triggered and running, SPS will be disabled while starting. In that case, administrator should make sure, Mover execution finished and then enable SPS again. Similarly when SPS enabled already, Mover cannot be run. If administrator is looking to run Mover tool explicitly, then he/she should make sure to disable SPS first and then run Mover. Please look at the commands section to know how to enable or disable SPS dynamically.
 
 Storage Policy Commands
 -----------------------
@@ -230,10 +230,10 @@ Check the running status of Storage Policy Satisfier in namenode. If it is runni
 
 * Command:
 
-        hdfs storagepolicies -isSPSRunning
+        hdfs storagepolicies -isSatisfierRunning
 
-### Activate or Deactivate SPS without restarting Namenode
-If administrator wants to activate or deactivate SPS feature while Namenode is running, first he/she needs to update the desired value(true or false) for the configuration item `dfs.storage.policy.satisfier.activate` in configuration file (`hdfs-site.xml`) and then run the following Namenode reconfig command
+### Enable or Disable SPS without restarting Namenode
+If administrator wants to enable or disable SPS feature while Namenode is running, first he/she needs to update the desired value(true or false) for the configuration item `dfs.storage.policy.satisfier.enabled` in configuration file (`hdfs-site.xml`) and then run the following Namenode reconfig command
 
 +       hdfs dfsadmin -reconfig namenode <host:ipc_port> start
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index f5ad187..29cdf37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -577,7 +577,7 @@ Usage:
           [-getStoragePolicy -path <path>]
           [-unsetStoragePolicy -path <path>]
           [-satisfyStoragePolicy -path <path>]
-          [-isSPSRunning]
+          [-isSatisfierRunning]
           [-help <command-name>]
 
 Lists out all/Gets/sets/unsets storage policies. See the [HDFS Storage Policy Documentation](./ArchivalStorage.html) for more information.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
index 8fbbf33..402d4d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -69,7 +69,7 @@ public class TestStoragePolicySatisfyWorker {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
         1L);
     conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
-    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         true);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index c4f9bfa..2b9c186 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -114,7 +114,7 @@ public class TestMover {
         1L);
     conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
     conf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
   }
 
   static Mover newMover(Configuration conf) throws IOException {
@@ -136,7 +136,7 @@ public class TestMover {
   public void testScheduleSameBlock() throws IOException {
     final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(4).build();
     try {
@@ -253,7 +253,7 @@ public class TestMover {
   public void testMoverCli() throws Exception {
     final Configuration clusterConf = new HdfsConfiguration();
     clusterConf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster
         .Builder(clusterConf).numDataNodes(0).build();
     try {
@@ -288,7 +288,7 @@ public class TestMover {
   public void testMoverCliWithHAConf() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster
         .Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
@@ -313,14 +313,14 @@ public class TestMover {
   public void testMoverCliWithFederation() throws Exception {
     final Configuration clusterConf = new HdfsConfiguration();
     clusterConf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster
         .Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
         .numDataNodes(0).build();
     final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
     DFSTestUtil.setFederatedConfiguration(cluster, conf);
     try {
       Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
@@ -366,14 +366,14 @@ public class TestMover {
   public void testMoverCliWithFederationHA() throws Exception {
     final Configuration clusterConf = new HdfsConfiguration();
     clusterConf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster
         .Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
         .numDataNodes(0).build();
     final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
     DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
     try {
       Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
@@ -438,7 +438,7 @@ public class TestMover {
     // HDFS-8147
     final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(3)
         .storageTypes(
@@ -468,7 +468,7 @@ public class TestMover {
   public void testMoveWhenStoragePolicySatisfierIsRunning() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, true);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, true);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(3)
         .storageTypes(
@@ -545,7 +545,7 @@ public class TestMover {
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
         false);
     conf.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
   }
 
   @Test(timeout = 300000)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index 6ef0361..9d741ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -97,7 +97,7 @@ public class TestStorageMover {
         DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 2L);
     DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
     DEFAULT_CONF.setBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false);
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
 
     DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
     HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
index 8c7d982..04a63ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
@@ -47,7 +47,7 @@ public class TestBlockStorageMovementAttemptedItems {
   @After
   public void teardown() {
     if (bsmAttemptedItems != null) {
-      bsmAttemptedItems.deactivate();
+      bsmAttemptedItems.stop();
       bsmAttemptedItems.stopGracefully();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 3e7f2e3..b4a461a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@@ -44,8 +45,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
 
@@ -223,63 +224,99 @@ public class TestNameNodeReconfigure {
   }
 
   /**
-   * Tests activate/deactivate Storage Policy Satisfier dynamically.
+   * Tests enable/disable Storage Policy Satisfier dynamically when
+   * "dfs.storage.policy.enabled" feature is disabled.
+   *
+   * @throws ReconfigurationException
+   * @throws IOException
    */
   @Test(timeout = 30000)
-  public void testReconfigureStoragePolicySatisfierActivated()
+  public void testReconfigureSPSWithStoragePolicyDisabled()
+      throws ReconfigurationException, IOException {
+    // shutdown cluster
+    cluster.shutdown();
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+
+    final NameNode nameNode = cluster.getNameNode();
+    verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false);
+
+    // enable SPS
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        "true");
+
+    // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
+    assertEquals("SPS shouldn't start as "
+        + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled", false,
+            nameNode.getNamesystem().getBlockManager()
+            .isStoragePolicySatisfierRunning());
+
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
+        true, nameNode.getConf()
+            .getBoolean(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+            DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT));
+  }
+
+  /**
+   * Tests enable/disable Storage Policy Satisfier dynamically.
+   */
+  @Test(timeout = 30000)
+  public void testReconfigureStoragePolicySatisfierEnabled()
       throws ReconfigurationException {
     final NameNode nameNode = cluster.getNameNode();
 
-    verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
-        true);
+    verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        false);
     // try invalid values
     try {
-      nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+      nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
           "text");
       fail("ReconfigurationException expected");
     } catch (ReconfigurationException e) {
       GenericTestUtils.assertExceptionContains(
-          "For activating or deactivating storage policy satisfier, "
+          "For enabling or disabling storage policy satisfier, "
               + "we must pass true/false only",
           e.getCause());
     }
 
     // enable SPS
-    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         "true");
 
-    verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         true);
 
     // disable SPS
-    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         "false");
-    verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         false);
 
-    // revert to default
-    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    // enable SPS
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         "true");
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
         true, nameNode.getNamesystem().getBlockManager()
             .isStoragePolicySatisfierRunning());
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
         true, nameNode.getConf()
-            .getBoolean(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false));
+            .getBoolean(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false));
   }
 
   /**
-   * Test to satisfy storage policy after deactivating storage policy satisfier.
+   * Test to satisfy storage policy after disabled storage policy satisfier.
    */
   @Test(timeout = 30000)
-  public void testSatisfyStoragePolicyAfterSatisfierDeactivated()
+  public void testSatisfyStoragePolicyAfterSatisfierDisabled()
       throws ReconfigurationException, IOException {
     final NameNode nameNode = cluster.getNameNode();
 
-    // deactivate SPS
-    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    // disable SPS
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         "false");
-    verifySPSActivated(nameNode, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         false);
 
     Path filePath = new Path("/testSPS");
@@ -288,32 +325,32 @@ public class TestNameNodeReconfigure {
     fileSystem.setStoragePolicy(filePath, "COLD");
     try {
       fileSystem.satisfyStoragePolicy(filePath);
-      fail("Expected to fail, as storage policy feature has deactivated.");
+      fail("Expected to fail, as storage policy feature has disabled.");
     } catch (RemoteException e) {
       GenericTestUtils
           .assertExceptionContains("Cannot request to satisfy storage policy "
-              + "when storage policy satisfier feature has been deactivated"
-              + " by admin. Seek for an admin help to activate it "
+              + "when storage policy satisfier feature has been disabled"
+              + " by admin. Seek for an admin help to enable it "
               + "or use Mover tool.", e);
     }
 
     // revert to default
-    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
+    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
         "true");
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
         true, nameNode.getNamesystem().getBlockManager()
             .isStoragePolicySatisfierRunning());
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY + " has wrong value",
+    assertEquals(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY + " has wrong value",
         true, nameNode.getConf()
-            .getBoolean(DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, false));
+            .getBoolean(DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, false));
   }
 
-  void verifySPSActivated(final NameNode nameNode, String property,
+  void verifySPSEnabled(final NameNode nameNode, String property,
       boolean expected) {
     assertEquals(property + " has wrong value", expected, nameNode
         .getNamesystem().getBlockManager().isStoragePolicySatisfierRunning());
     assertEquals(property + " has wrong value", expected, nameNode.getConf()
-        .getBoolean(property, DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT));
+        .getBoolean(property, DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index bdf0159..8516ea0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -96,6 +96,8 @@ public class TestPersistentStoragePolicySatisfier {
     conf.set(
         DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
         "3000");
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     final int dnNumber = storageTypes.length;
     final short replication = 3;
     MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf)
@@ -282,6 +284,8 @@ public class TestPersistentStoragePolicySatisfier {
     MiniDFSCluster haCluster = null;
     try {
       conf = new HdfsConfiguration();
+      conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+          true);
       haCluster = new MiniDFSCluster
           .Builder(conf)
           .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
@@ -376,7 +380,7 @@ public class TestPersistentStoragePolicySatisfier {
       fs.setStoragePolicy(testFile, ONE_SSD);
       fs.satisfyStoragePolicy(testFile);
 
-      cluster.getNamesystem().getBlockManager().deactivateSPS();
+      cluster.getNamesystem().getBlockManager().disableSPS();
 
       // Make sure satisfy xattr has been removed.
       DFSTestUtil.waitForXattrRemoved(testFileName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 7f96003..2536834 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -96,6 +96,8 @@ public class TestStoragePolicySatisfier {
 
   private void createCluster() throws IOException {
     config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     hdfsCluster = startCluster(config, allDiskTypes, numOfDatanodes,
         storagesPerDatanode, capacity);
     dfs = hdfsCluster.getFileSystem();
@@ -522,7 +524,7 @@ public class TestStoragePolicySatisfier {
       createCluster();
       // Stop SPS
       hdfsCluster.getNameNode().reconfigurePropertyImpl(
-          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false");
       running = hdfsCluster.getFileSystem()
           .getClient().isStoragePolicySatisfierRunning();
       Assert.assertFalse("SPS should stopped as configured.", running);
@@ -533,7 +535,7 @@ public class TestStoragePolicySatisfier {
 
       // Restart SPS
       hdfsCluster.getNameNode().reconfigurePropertyImpl(
-          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "true");
 
       running = hdfsCluster.getFileSystem()
           .getClient().isStoragePolicySatisfierRunning();
@@ -548,7 +550,7 @@ public class TestStoragePolicySatisfier {
 
       // Restart SPS again
       hdfsCluster.getNameNode().reconfigurePropertyImpl(
-          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "true");
       running = hdfsCluster.getFileSystem()
           .getClient().isStoragePolicySatisfierRunning();
       Assert.assertTrue("SPS should be running as "
@@ -558,7 +560,7 @@ public class TestStoragePolicySatisfier {
       doTestWhenStoragePolicySetToCOLD();
     } catch (ReconfigurationException e) {
       throw new IOException("Exception when reconfigure "
-          + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, e);
+          + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, e);
     } finally {
       if (out != null) {
         out.close();
@@ -599,6 +601,8 @@ public class TestStoragePolicySatisfier {
   @Test(timeout = 120000)
   public void testMoveWithBlockPinning() throws Exception {
     config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
+    config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(3)
         .storageTypes(
             new StorageType[][] {{StorageType.DISK, StorageType.DISK},
@@ -663,6 +667,8 @@ public class TestStoragePolicySatisfier {
     try {
       int numOfDns = 5;
       config.setLong("dfs.block.size", 1024);
+      config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+          true);
       allDiskTypes =
           new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
               {StorageType.DISK, StorageType.DISK},
@@ -707,6 +713,8 @@ public class TestStoragePolicySatisfier {
             {StorageType.DISK, StorageType.SSD},
             {StorageType.DISK, StorageType.RAM_DISK}};
     config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     try {
       hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
           storagesPerDatanode, capacity);
@@ -746,6 +754,8 @@ public class TestStoragePolicySatisfier {
             {StorageType.DISK, StorageType.DISK}};
 
     config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     try {
       hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
           storagesPerDatanode, capacity);
@@ -782,6 +792,8 @@ public class TestStoragePolicySatisfier {
         {StorageType.DISK, StorageType.ARCHIVE}};
 
     try {
+      config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+          true);
       hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
           storagesPerDatanode, capacity);
       dfs = hdfsCluster.getFileSystem();
@@ -825,6 +837,8 @@ public class TestStoragePolicySatisfier {
             {StorageType.DISK, StorageType.SSD},
             {StorageType.DISK, StorageType.DISK}};
     config.setLong("dfs.block.size", 2 * DEFAULT_BLOCK_SIZE);
+    config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     long dnCapacity = 1024 * DEFAULT_BLOCK_SIZE + (2 * DEFAULT_BLOCK_SIZE - 1);
     try {
       hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
@@ -915,7 +929,8 @@ public class TestStoragePolicySatisfier {
         1L);
     config.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
         false);
-
+    config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     try {
       hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
           storagesPerDatanode, capacity);
@@ -968,8 +983,10 @@ public class TestStoragePolicySatisfier {
   public void testSPSWhenFileLengthIsZero() throws Exception {
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
-          .build();
+      Configuration conf = new Configuration();
+      conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+          true);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
       DistributedFileSystem fs = cluster.getFileSystem();
       Path filePath = new Path("/zeroSizeFile");
@@ -1006,6 +1023,8 @@ public class TestStoragePolicySatisfier {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new Configuration();
+      conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+          true);
       conf.set(DFSConfigKeys
           .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
           "3000");
@@ -1054,6 +1073,8 @@ public class TestStoragePolicySatisfier {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new Configuration();
+      conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+          true);
       conf.set(DFSConfigKeys
           .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
           "3000");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
index c88d5be..b0fd3af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
@@ -65,6 +65,8 @@ public class TestStoragePolicySatisfierWithHA {
 
   private void createCluster() throws IOException {
     config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     startCluster(config, allDiskTypes, numOfDatanodes, storagesPerDatanode,
         capacity);
     dfs = cluster.getFileSystem(nnIndex);
@@ -131,15 +133,15 @@ public class TestStoragePolicySatisfierWithHA {
 
       try {
         cluster.getNameNode(0).reconfigurePropertyImpl(
-            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
-        Assert.fail("It's not allowed to activate or deactivate"
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false");
+        Assert.fail("It's not allowed to enable or disable"
             + " StoragePolicySatisfier on Standby NameNode");
       } catch (ReconfigurationException e) {
         GenericTestUtils.assertExceptionContains("Could not change property "
-            + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY
+            + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY
             + " from 'true' to 'false'", e);
         GenericTestUtils.assertExceptionContains(
-            "Activating or deactivating storage policy satisfier service on "
+            "Enabling or disabling storage policy satisfier service on "
                 + "standby NameNode is not allowed", e.getCause());
       }
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index c070113..fc5d0a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -103,6 +103,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
     }
 
     final Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
         StripedFileTestUtil.getDefaultECPolicy().getName());
     initConfWithStripe(conf, defaultStripeBlockSize);
@@ -215,6 +217,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
     final Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
         StripedFileTestUtil.getDefaultECPolicy().getName());
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     initConfWithStripe(conf, defaultStripeBlockSize);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(numOfDatanodes)
@@ -325,6 +329,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
         "3000");
     conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
         StripedFileTestUtil.getDefaultECPolicy().getName());
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     initConfWithStripe(conf, defaultStripeBlockSize);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(numOfDatanodes)
@@ -415,6 +421,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
     final Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
         StripedFileTestUtil.getDefaultECPolicy().getName());
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     initConfWithStripe(conf, defaultStripeBlockSize);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(numOfDatanodes)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5704935/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
index c86eecd..e6a0356 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
@@ -47,6 +47,8 @@ public class TestStoragePolicyCommands {
   @Before
   public void clusterSetUp() throws IOException {
     conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+        true);
     StorageType[][] newtypes = new StorageType[][] {
         {StorageType.ARCHIVE, StorageType.DISK}};
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL)
@@ -163,7 +165,7 @@ public class TestStoragePolicyCommands {
         "File/Directory does not exist: /fooz");
   }
 
-  @Test
+  @Test(timeout = 30000)
   public void testStoragePolicySatisfierCommand() throws Exception {
     final String file = "/testStoragePolicySatisfierCommand";
     DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
@@ -184,18 +186,21 @@ public class TestStoragePolicyCommands {
         fs);
   }
 
-  @Test
-  public void testIsSPSRunningCommand() throws Exception {
-    final String file = "/testIsSPSRunningCommand";
+  @Test(timeout = 30000)
+  public void testIsSatisfierRunningCommand() throws Exception {
+    final String file = "/testIsSatisfierRunningCommand";
     DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
     final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
-    DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "yes");
+    DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "yes");
+
     cluster.getNameNode().reconfigureProperty(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false");
     cluster.waitActive();
-    DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "no");
+
+    DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "no");
+
     // Test with unnecessary args
-    DFSTestUtil.toolRun(admin, "-isSPSRunning status", 1,
+    DFSTestUtil.toolRun(admin, "-isSatisfierRunning status", 1,
         "Can't understand arguments: ");
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HDFS-10794. [SPS]: Provide storage policy satisfy worker at DN for co-ordinating the block storage movement work. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-10794. [SPS]: Provide storage policy satisfy worker at DN for co-ordinating the block storage movement work. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5af54dbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5af54dbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5af54dbf

Branch: refs/heads/HDFS-10285
Commit: 5af54dbfaeacde9d4b59aa34d6974827f46bf3f5
Parents: bb6a3c8
Author: Kai Zheng <ka...@intel.com>
Authored: Wed Sep 14 17:02:11 2016 +0800
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:49:58 2017 +0530

----------------------------------------------------------------------
 .../datanode/StoragePolicySatisfyWorker.java    | 258 +++++++++++++++++++
 .../protocol/BlockStorageMovementCommand.java   | 101 ++++++++
 .../TestStoragePolicySatisfyWorker.java         | 160 ++++++++++++
 3 files changed, 519 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5af54dbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
new file mode 100644
index 0000000..6df4e81
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Daemon;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * StoragePolicySatisfyWorker handles the storage policy satisfier commands.
+ * These commands would be issued from NameNode as part of Datanode's heart beat
+ * response. BPOfferService delegates the work to this class for handling
+ * BlockStorageMovement commands.
+ */
+@InterfaceAudience.Private
+public class StoragePolicySatisfyWorker {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(StoragePolicySatisfyWorker.class);
+
+  private final DataNode datanode;
+  private final int ioFileBufferSize;
+
+  private final int moverThreads;
+  private final ExecutorService moveExecutor;
+  private final CompletionService<Void> moverExecutorCompletionService;
+  private final List<Future<Void>> moverTaskFutures;
+
+  public StoragePolicySatisfyWorker(Configuration conf, DataNode datanode) {
+    this.datanode = datanode;
+    this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
+
+    moverThreads = conf.getInt(DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
+        DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
+    moveExecutor = initializeBlockMoverThreadPool(moverThreads);
+    moverExecutorCompletionService = new ExecutorCompletionService<>(
+        moveExecutor);
+    moverTaskFutures = new ArrayList<>();
+    // TODO: Needs to manage the number of concurrent moves per DataNode.
+  }
+
+  private ThreadPoolExecutor initializeBlockMoverThreadPool(int num) {
+    LOG.debug("Block mover to satisfy storage policy; pool threads={}", num);
+
+    ThreadPoolExecutor moverThreadPool = new ThreadPoolExecutor(1, num, 60,
+        TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
+        new Daemon.DaemonFactory() {
+          private final AtomicInteger threadIndex = new AtomicInteger(0);
+
+          @Override
+          public Thread newThread(Runnable r) {
+            Thread t = super.newThread(r);
+            t.setName("BlockMoverTask-" + threadIndex.getAndIncrement());
+            return t;
+          }
+        }, new ThreadPoolExecutor.CallerRunsPolicy() {
+          @Override
+          public void rejectedExecution(Runnable runnable,
+              ThreadPoolExecutor e) {
+            LOG.info("Execution for block movement to satisfy storage policy"
+                + " got rejected, Executing in current thread");
+            // will run in the current thread.
+            super.rejectedExecution(runnable, e);
+          }
+        });
+
+    moverThreadPool.allowCoreThreadTimeOut(true);
+    return moverThreadPool;
+  }
+
+  public void processBlockMovingTasks(long trackID,
+      List<BlockMovingInfo> blockMovingInfos) {
+    Future<Void> moveCallable = null;
+    for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
+      assert blkMovingInfo
+          .getSources().length == blkMovingInfo.getTargets().length;
+
+      for (int i = 0; i < blkMovingInfo.getSources().length; i++) {
+        BlockMovingTask blockMovingTask =
+            new BlockMovingTask(blkMovingInfo.getBlock(),
+            blkMovingInfo.getSources()[i],
+            blkMovingInfo.getTargets()[i],
+            blkMovingInfo.getTargetStorageTypes()[i]);
+        moveCallable = moverExecutorCompletionService
+            .submit(blockMovingTask);
+        moverTaskFutures.add(moveCallable);
+      }
+    }
+
+    // TODO: Presently this function act as a blocking call, this has to be
+    // refined by moving the tracking logic to another tracker thread.
+    for (int i = 0; i < moverTaskFutures.size(); i++) {
+      try {
+        moveCallable = moverExecutorCompletionService.take();
+        moveCallable.get();
+      } catch (InterruptedException | ExecutionException e) {
+        // TODO: Failure retries and report back the error to NameNode.
+        LOG.error("Exception while moving block replica to target storage type",
+            e);
+      }
+    }
+  }
+
+  /**
+   * This class encapsulates the process of moving the block replica to the
+   * given target.
+   */
+  private class BlockMovingTask implements Callable<Void> {
+    private final ExtendedBlock block;
+    private final DatanodeInfo source;
+    private final DatanodeInfo target;
+    private final StorageType targetStorageType;
+
+    BlockMovingTask(ExtendedBlock block, DatanodeInfo source,
+        DatanodeInfo target, StorageType targetStorageType) {
+      this.block = block;
+      this.source = source;
+      this.target = target;
+      this.targetStorageType = targetStorageType;
+    }
+
+    @Override
+    public Void call() {
+      moveBlock();
+      return null;
+    }
+
+    private void moveBlock() {
+      LOG.info("Start moving block {}", block);
+
+      LOG.debug("Start moving block:{} from src:{} to destin:{} to satisfy "
+          + "storageType:{}", block, source, target, targetStorageType);
+      Socket sock = null;
+      DataOutputStream out = null;
+      DataInputStream in = null;
+      try {
+        DNConf dnConf = datanode.getDnConf();
+        String dnAddr = target.getXferAddr(dnConf.getConnectToDnViaHostname());
+        sock = datanode.newSocket();
+        NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr),
+            dnConf.getSocketTimeout());
+        sock.setSoTimeout(2 * dnConf.getSocketTimeout());
+        LOG.debug("Connecting to datanode {}", dnAddr);
+
+        OutputStream unbufOut = sock.getOutputStream();
+        InputStream unbufIn = sock.getInputStream();
+
+        Token<BlockTokenIdentifier> accessToken = datanode.getBlockAccessToken(
+            block, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
+
+        DataEncryptionKeyFactory keyFactory = datanode
+            .getDataEncryptionKeyFactoryForBlock(block);
+        IOStreamPair saslStreams = datanode.getSaslClient().socketSend(sock,
+            unbufOut, unbufIn, keyFactory, accessToken, target);
+        unbufOut = saslStreams.out;
+        unbufIn = saslStreams.in;
+        out = new DataOutputStream(
+            new BufferedOutputStream(unbufOut, ioFileBufferSize));
+        in = new DataInputStream(
+            new BufferedInputStream(unbufIn, ioFileBufferSize));
+        sendRequest(out, block, accessToken, source, targetStorageType);
+        receiveResponse(in);
+
+        LOG.debug(
+            "Successfully moved block:{} from src:{} to destin:{} for"
+                + " satisfying storageType:{}",
+            block, source, target, targetStorageType);
+      } catch (IOException e) {
+        // TODO: handle failure retries
+        LOG.warn(
+            "Failed to move block:{} from src:{} to destin:{} to satisfy "
+                + "storageType:{}",
+            block, source, target, targetStorageType, e);
+      } finally {
+        IOUtils.closeStream(out);
+        IOUtils.closeStream(in);
+        IOUtils.closeSocket(sock);
+      }
+    }
+
+    /** Send a reportedBlock replace request to the output stream. */
+    private void sendRequest(DataOutputStream out, ExtendedBlock eb,
+        Token<BlockTokenIdentifier> accessToken, DatanodeInfo srcDn,
+        StorageType destinStorageType) throws IOException {
+      new Sender(out).replaceBlock(eb, destinStorageType, accessToken,
+          srcDn.getDatanodeUuid(), srcDn);
+    }
+
+    /** Receive a reportedBlock copy response from the input stream. */
+    private void receiveResponse(DataInputStream in) throws IOException {
+      BlockOpResponseProto response = BlockOpResponseProto
+          .parseFrom(vintPrefixed(in));
+      while (response.getStatus() == Status.IN_PROGRESS) {
+        // read intermediate responses
+        response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
+      }
+      String logInfo = "reportedBlock move is failed";
+      DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5af54dbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
new file mode 100644
index 0000000..42ba265
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+
+/**
+ * A BlockStorageMovementCommand is an instruction to a DataNode to move the
+ * given set of blocks to specified target DataNodes to fulfill the block
+ * storage policy.
+ *
+ * Upon receiving this command, this DataNode coordinates all the block movement
+ * by passing the details to
+ * {@link org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker}
+ * service. After the block movement this DataNode sends response back to the
+ * NameNode about the movement status.
+ */
+public class BlockStorageMovementCommand extends DatanodeCommand {
+
+  // TODO: constructor needs to be refined based on the block movement data
+  // structure.
+  BlockStorageMovementCommand(int action) {
+    super(action);
+  }
+
+  /**
+   * Stores block to storage info that can be used for block movement.
+   */
+  public static class BlockMovingInfo {
+    private ExtendedBlock blk;
+    private DatanodeInfo[] sourceNodes;
+    private StorageType[] sourceStorageTypes;
+    private DatanodeInfo[] targetNodes;
+    private StorageType[] targetStorageTypes;
+
+    public BlockMovingInfo(ExtendedBlock block,
+        DatanodeInfo[] sourceDnInfos, DatanodeInfo[] targetDnInfos,
+        StorageType[] srcStorageTypes, StorageType[] targetStorageTypes) {
+      this.blk = block;
+      this.sourceNodes = sourceDnInfos;
+      this.targetNodes = targetDnInfos;
+      this.sourceStorageTypes = srcStorageTypes;
+      this.targetStorageTypes = targetStorageTypes;
+    }
+
+    public void addBlock(ExtendedBlock block) {
+      this.blk = block;
+    }
+
+    public ExtendedBlock getBlock() {
+      return this.blk;
+    }
+
+    public DatanodeInfo[] getSources() {
+      return sourceNodes;
+    }
+
+    public DatanodeInfo[] getTargets() {
+      return targetNodes;
+    }
+
+    public StorageType[] getTargetStorageTypes() {
+      return targetStorageTypes;
+    }
+
+    public StorageType[] getSourceStorageTypes() {
+      return sourceStorageTypes;
+    }
+
+    @Override
+    public String toString() {
+      return new StringBuilder().append("BlockMovingInfo(\n  ")
+          .append("Moving block: ").append(blk).append(" From: ")
+          .append(Arrays.asList(sourceNodes)).append(" To: [")
+          .append(Arrays.asList(targetNodes)).append(")\n")
+          .append(" sourceStorageTypes: ")
+          .append(Arrays.toString(sourceStorageTypes))
+          .append(" targetStorageTypes: ")
+          .append(Arrays.toString(targetStorageTypes)).toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5af54dbf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
new file mode 100644
index 0000000..c722306
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+
+/**
+ * This class tests the behavior of moving block replica to the given storage
+ * type to fulfill the storage policy requirement.
+ */
+public class TestStoragePolicySatisfyWorker {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestStoragePolicySatisfyWorker.class);
+
+  private static final int DEFAULT_BLOCK_SIZE = 100;
+
+  private static void initConf(Configuration conf) {
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+        1L);
+    conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
+  }
+
+  /**
+   * Tests to verify that the block replica is moving to ARCHIVE storage type to
+   * fulfill the storage policy requirement.
+   */
+  @Test(timeout = 120000)
+  public void testMoveSingleBlockToAnotherDatanode() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(4)
+        .storageTypes(
+            new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
+                {StorageType.DISK, StorageType.ARCHIVE},
+                {StorageType.DISK, StorageType.ARCHIVE},
+                {StorageType.DISK, StorageType.ARCHIVE}})
+        .build();
+    try {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final String file = "/testMoveSingleBlockToAnotherDatanode";
+      // write to DISK
+      final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
+      out.writeChars("testMoveSingleBlockToAnotherDatanode");
+      out.close();
+
+      // verify before movement
+      LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
+      StorageType[] storageTypes = lb.getStorageTypes();
+      for (StorageType storageType : storageTypes) {
+        Assert.assertTrue(StorageType.DISK == storageType);
+      }
+      // move to ARCHIVE
+      dfs.setStoragePolicy(new Path(file), "COLD");
+
+      lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
+      DataNode src = cluster.getDataNodes().get(3);
+      DatanodeInfo targetDnInfo = DFSTestUtil
+          .getLocalDatanodeInfo(src.getXferPort());
+
+      // TODO: Need to revisit this when NN is implemented to be able to send
+      // block moving commands.
+      StoragePolicySatisfyWorker worker = new StoragePolicySatisfyWorker(conf,
+          src);
+      List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
+      BlockMovingInfo blockMovingInfo = prepareBlockMovingInfo(
+          lb.getBlock(), lb.getLocations()[0], targetDnInfo,
+          lb.getStorageTypes()[0], StorageType.ARCHIVE);
+      blockMovingInfos.add(blockMovingInfo);
+      INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
+      worker.processBlockMovingTasks(inode.getId(),
+          blockMovingInfos);
+      cluster.triggerHeartbeats();
+
+      // Wait till NameNode notified about the block location details
+      waitForLocatedBlockWithArchiveStorageType(dfs, file, 1, 30000);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  private void waitForLocatedBlockWithArchiveStorageType(
+      final DistributedFileSystem dfs, final String file,
+      int expectedArchiveCount, int timeout) throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        LocatedBlock lb = null;
+        try {
+          lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
+        } catch (IOException e) {
+          LOG.error("Exception while getting located blocks", e);
+          return false;
+        }
+
+        int archiveCount = 0;
+        for (StorageType storageType : lb.getStorageTypes()) {
+          if (StorageType.ARCHIVE == storageType) {
+            archiveCount++;
+          }
+        }
+        LOG.info("Archive replica count, expected={} and actual={}",
+            expectedArchiveCount, archiveCount);
+        return expectedArchiveCount == archiveCount;
+      }
+    }, 100, timeout);
+  }
+
+  BlockMovingInfo prepareBlockMovingInfo(ExtendedBlock block,
+      DatanodeInfo src, DatanodeInfo destin, StorageType storageType,
+      StorageType targetStorageType) {
+    return new BlockMovingInfo(block, new DatanodeInfo[] {src},
+        new DatanodeInfo[] {destin}, new StorageType[] {storageType},
+        new StorageType[] {targetStorageType});
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when dropSPSWork() called. Contributed by Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when dropSPSWork() called. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cce0fdcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cce0fdcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cce0fdcc

Branch: refs/heads/HDFS-10285
Commit: cce0fdcc30405bc695cf9783122c5ad1c411f14a
Parents: 5909a38
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Tue May 30 18:12:17 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:50 2017 +0530

----------------------------------------------------------------------
 .../hdfs/server/datanode/BlockStorageMovementTracker.java      | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cce0fdcc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index 99858bc..c7e952b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -88,13 +88,17 @@ public class BlockStorageMovementTracker implements Runnable {
           long trackId = result.getTrackId();
           List<Future<BlockMovementResult>> blocksMoving = moverTaskFutures
               .get(trackId);
+          if (blocksMoving == null) {
+            LOG.warn("Future task doesn't exist for trackId " + trackId);
+            continue;
+          }
           blocksMoving.remove(future);
 
           List<BlockMovementResult> resultPerTrackIdList =
               addMovementResultToTrackIdList(result);
 
           // Completed all the scheduled blocks movement under this 'trackId'.
-          if (blocksMoving.isEmpty()) {
+          if (blocksMoving.isEmpty() || moverTaskFutures.get(trackId) == null) {
             synchronized (moverTaskFutures) {
               moverTaskFutures.remove(trackId);
             }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: HADOOP-14729. Upgrade JUnit 3 test cases to JUnit 4. Contributed by Ajay Kumar.

Posted by ra...@apache.org.
HADOOP-14729. Upgrade JUnit 3 test cases to JUnit 4. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b7cbe38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b7cbe38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b7cbe38

Branch: refs/heads/HDFS-10285
Commit: 8b7cbe3840f1a4f4dc038ac3018a4f0dbe3bc82d
Parents: 3a4e861
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Aug 25 09:55:46 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Aug 25 09:55:46 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/conf/TestConfiguration.java   | 340 ++++++++++++-------
 .../hadoop/conf/TestConfigurationSubclass.java  |   8 +-
 .../apache/hadoop/conf/TestDeprecatedKeys.java  |   8 +-
 .../apache/hadoop/conf/TestGetInstances.java    |   8 +-
 .../org/apache/hadoop/fs/TestAvroFSInput.java   |   6 +-
 .../test/java/org/apache/hadoop/fs/TestDU.java  |  19 +-
 .../java/org/apache/hadoop/fs/TestFilterFs.java |   8 +-
 .../hadoop/fs/TestGetFileBlockLocations.java    |  25 +-
 .../org/apache/hadoop/fs/TestGlobExpander.java  |   7 +-
 .../java/org/apache/hadoop/fs/TestTrash.java    |  24 +-
 .../apache/hadoop/fs/TestTruncatedInputBug.java |   6 +-
 .../hadoop/fs/permission/TestFsPermission.java  |  14 +-
 .../apache/hadoop/ipc/TestFairCallQueue.java    |  33 +-
 .../org/apache/hadoop/log/TestLog4Json.java     |   6 +-
 .../hadoop/net/TestScriptBasedMapping.java      |   9 +-
 .../TestScriptBasedMappingWithDependency.java   |   7 +-
 .../security/TestAuthenticationFilter.java      |   6 +-
 .../TestAuthenticationWithProxyUserFilter.java  |   7 +-
 .../security/TestWhitelistBasedResolver.java    |   9 +-
 .../apache/hadoop/security/token/TestToken.java |  12 +-
 .../hadoop/util/TestAsyncDiskService.java       |   6 +-
 .../apache/hadoop/util/TestCacheableIPList.java |  13 +-
 .../apache/hadoop/util/TestFileBasedIPList.java |  12 +-
 .../org/apache/hadoop/util/TestFindClass.java   |   2 +-
 .../apache/hadoop/util/TestGenericsUtil.java    |  12 +-
 .../org/apache/hadoop/util/TestIndexedSort.java |   7 +-
 .../hadoop/util/TestNativeLibraryChecker.java   |   8 +-
 .../mapred/TestMRWithDistributedCache.java      |  40 +--
 .../hadoop/mapred/TestFileOutputCommitter.java  |  23 +-
 .../apache/hadoop/mapred/TestIndexCache.java    |  15 +-
 .../hadoop/mapred/TestJobEndNotifier.java       |  12 +-
 .../mapreduce/TestJobMonitorAndPrint.java       |   8 +-
 .../lib/output/TestFileOutputCommitter.java     |  39 ++-
 .../lib/output/TestFileOutputFormat.java        |   7 +-
 .../mapred/jobcontrol/TestJobControl.java       |   6 +-
 .../hadoop/mapreduce/TestMapCollection.java     |   6 -
 .../lib/input/TestDelegatingInputFormat.java    |   7 +-
 .../lib/jobcontrol/TestMapReduceJobControl.java |   2 +-
 .../output/TestMRCJCFileOutputCommitter.java    |  17 +-
 .../mapred/nativetask/TestTaskContext.java      |  23 +-
 .../nativetask/buffer/TestInputBuffer.java      |   7 +-
 .../nativetask/buffer/TestOutputBuffer.java     |   7 +-
 .../nativetask/serde/TestKVSerializer.java      |  13 +-
 .../nativetask/utils/TestReadWriteBuffer.java   |   7 +-
 .../nativetask/utils/TestSizedWritable.java     |   5 +-
 .../examples/TestBaileyBorweinPlouffe.java      |  14 +-
 .../hadoop/examples/pi/math/TestLongLong.java   |  33 +-
 .../hadoop/examples/pi/math/TestModular.java    | 105 +++---
 .../hadoop/examples/pi/math/TestSummation.java  |  30 +-
 .../hadoop/contrib/utils/join/TestDataJoin.java |  41 +--
 .../org/apache/hadoop/tools/TestDistCh.java     |  23 +-
 .../typedbytes/TestTypedBytesWritable.java      |   7 +-
 .../amfilter/TestAmFilterInitializer.java       |  15 +-
 53 files changed, 718 insertions(+), 416 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 8fe88bc..b41a807 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -44,7 +44,11 @@ import java.util.regex.Pattern;
 import static java.util.concurrent.TimeUnit.*;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import junit.framework.TestCase;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
@@ -65,11 +69,11 @@ import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.Logger;
 import org.apache.log4j.spi.LoggingEvent;
 import org.hamcrest.CoreMatchers;
-import org.junit.Assert;
 import org.mockito.Mockito;
 
-public class TestConfiguration extends TestCase {
+public class TestConfiguration {
 
+  private static final double DOUBLE_DELTA = 0.000000001f;
   private Configuration conf;
   final static String CONFIG = new File("./test-config-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG2 = new File("./test-config2-TestConfiguration.xml").getAbsolutePath();
@@ -82,7 +86,7 @@ public class TestConfiguration extends TestCase {
   private static final String CONFIG_MULTI_BYTE_SAVED = new File(
     "./test-config-multi-byte-saved-TestConfiguration.xml").getAbsolutePath();
   final static Random RAN = new Random();
-  final static String XMLHEADER = 
+  final static String XMLHEADER =
             IBM_JAVA?"<?xml version=\"1.0\" encoding=\"UTF-8\"?><configuration>":
   "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>";
 
@@ -94,18 +98,16 @@ public class TestConfiguration extends TestCase {
 
   private BufferedWriter out;
 
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
+  @Before
+  public void setUp() throws Exception {
     conf = new Configuration();
   }
-  
-  @Override
-  protected void tearDown() throws Exception {
+
+  @After
+  public void tearDown() throws Exception {
     if(out != null) {
       out.close();
     }
-    super.tearDown();
     new File(CONFIG).delete();
     new File(CONFIG2).delete();
     new File(CONFIG_FOR_ENUM).delete();
@@ -113,7 +115,7 @@ public class TestConfiguration extends TestCase {
     new File(CONFIG_MULTI_BYTE).delete();
     new File(CONFIG_MULTI_BYTE_SAVED).delete();
   }
-  
+
   private void startConfig() throws IOException{
     out.write("<?xml version=\"1.0\"?>\n");
     out.write("<configuration>\n");
@@ -164,6 +166,7 @@ public class TestConfiguration extends TestCase {
         + " [\n<!ENTITY " + entity + " SYSTEM \"" + value + "\">\n]>");
   }
 
+  @Test
   public void testInputStreamResource() throws Exception {
     StringWriter writer = new StringWriter();
     out = new BufferedWriter(writer);
@@ -182,6 +185,7 @@ public class TestConfiguration extends TestCase {
     assertEquals("A", conf.get("prop"));
   }
 
+  @Test
   public void testFinalWarnings() throws Exception {
     // Make a configuration file with a final property
     StringWriter writer = new StringWriter();
@@ -226,6 +230,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testNoFinalWarnings() throws Exception {
     // Make a configuration file with a final property
     StringWriter writer = new StringWriter();
@@ -263,6 +268,7 @@ public class TestConfiguration extends TestCase {
 
 
 
+  @Test
   public void testFinalWarningsMultiple() throws Exception {
     // Make a configuration file with a repeated final property
     StringWriter writer = new StringWriter();
@@ -296,6 +302,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testFinalWarningsMultipleOverride() throws Exception {
     // Make a configuration file with 2 final properties with different values
     StringWriter writer = new StringWriter();
@@ -358,6 +365,7 @@ public class TestConfiguration extends TestCase {
    * round-trips multi-byte string literals through saving and loading of config
    * and asserts that the same values were read.
    */
+  @Test
   public void testMultiByteCharacters() throws IOException {
     String priorDefaultEncoding = System.getProperty("file.encoding");
     try {
@@ -388,6 +396,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testVariableSubstitution() throws IOException {
     // stubbing only environment dependent functions
     Configuration mock = Mockito.spy(conf);
@@ -419,12 +428,13 @@ public class TestConfiguration extends TestCase {
       assertEq(p.val, gotRawVal);
       assertEq(p.expectEval, gotVal);
     }
-      
+
     // check that expansion also occurs for getInt()
     assertTrue(mock.getInt("intvar", -1) == 42);
     assertTrue(mock.getInt("my.int", -1) == 42);
   }
 
+  @Test
   public void testEnvDefault() throws IOException {
     Configuration mock = Mockito.spy(conf);
     Mockito.when(mock.getenv("NULL_VALUE")).thenReturn(null);
@@ -465,6 +475,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testFinalParam() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -474,7 +485,7 @@ public class TestConfiguration extends TestCase {
     Configuration conf1 = new Configuration();
     conf1.addResource(fileResource);
     assertNull("my var is not null", conf1.get("my.var"));
-	
+
     out=new BufferedWriter(new FileWriter(CONFIG2));
     startConfig();
     declareProperty("my.var", "myval", "myval", false);
@@ -486,6 +497,7 @@ public class TestConfiguration extends TestCase {
     assertNull("my var is not final", conf2.get("my.var"));
   }
 
+  @Test
   public void testCompactFormat() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -530,8 +542,8 @@ public class TestConfiguration extends TestCase {
   }
 
   void declareProperty(String name, String val, String expectEval,
-                       boolean isFinal)
-    throws IOException {
+      boolean isFinal)
+      throws IOException {
     appendProperty(name, val, isFinal);
     Prop p = new Prop();
     p.name = name;
@@ -543,10 +555,10 @@ public class TestConfiguration extends TestCase {
   void appendProperty(String name, String val) throws IOException {
     appendProperty(name, val, false);
   }
- 
-  void appendProperty(String name, String val, boolean isFinal, 
+
+  void appendProperty(String name, String val, boolean isFinal,
       String ... sources)
-    throws IOException {
+      throws IOException {
     out.write("<property>");
     out.write("<name>");
     out.write(name);
@@ -564,19 +576,19 @@ public class TestConfiguration extends TestCase {
     }
     out.write("</property>\n");
   }
-  
+
   void appendCompactFormatProperty(String name, String val) throws IOException {
     appendCompactFormatProperty(name, val, false);
   }
 
   void appendCompactFormatProperty(String name, String val, boolean isFinal)
-    throws IOException {
+      throws IOException {
     appendCompactFormatProperty(name, val, isFinal, null);
   }
 
   void appendCompactFormatProperty(String name, String val, boolean isFinal,
       String source)
-    throws IOException {
+      throws IOException {
     out.write("<property ");
     out.write("name=\"");
     out.write(name);
@@ -595,6 +607,7 @@ public class TestConfiguration extends TestCase {
     out.write("/>\n");
   }
 
+  @Test
   public void testOverlay() throws IOException{
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -610,25 +623,26 @@ public class TestConfiguration extends TestCase {
     appendProperty("b","d");
     appendProperty("e","e");
     endConfig();
-    
+
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    
+
     //set dynamically something
     conf.set("c","d");
     conf.set("a","d");
-    
+
     Configuration clone=new Configuration(conf);
     clone.addResource(new Path(CONFIG2));
-    
-    assertEquals(clone.get("a"), "d"); 
-    assertEquals(clone.get("b"), "d"); 
-    assertEquals(clone.get("c"), "d"); 
-    assertEquals(clone.get("d"), "e"); 
-    assertEquals(clone.get("e"), "f"); 
-    
-  }
-  
+
+    assertEquals(clone.get("a"), "d");
+    assertEquals(clone.get("b"), "d");
+    assertEquals(clone.get("c"), "d");
+    assertEquals(clone.get("d"), "e");
+    assertEquals(clone.get("e"), "f");
+
+  }
+
+  @Test
   public void testCommentsInValue() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -640,6 +654,7 @@ public class TestConfiguration extends TestCase {
     assertEquals("this  contains a comment", conf.get("my.comment"));
   }
 
+  @Test
   public void testEscapedCharactersInValue() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -651,6 +666,7 @@ public class TestConfiguration extends TestCase {
     assertEquals("''''", conf.get("my.comment"));
   }
 
+  @Test
   public void testTrim() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -658,13 +674,13 @@ public class TestConfiguration extends TestCase {
     String[] name = new String[100];
     for(int i = 0; i < name.length; i++) {
       name[i] = "foo" + i;
-      StringBuilder prefix = new StringBuilder(); 
-      StringBuilder postfix = new StringBuilder(); 
+      StringBuilder prefix = new StringBuilder();
+      StringBuilder postfix = new StringBuilder();
       for(int j = 0; j < 3; j++) {
         prefix.append(whitespaces[RAN.nextInt(whitespaces.length)]);
         postfix.append(whitespaces[RAN.nextInt(whitespaces.length)]);
       }
-      
+
       appendProperty(prefix + name[i] + postfix, name[i] + ".value");
     }
     endConfig();
@@ -675,6 +691,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testGetLocalPath() throws IOException {
     Configuration conf = new Configuration();
     String[] dirs = new String[]{"a", "b", "c"};
@@ -690,7 +707,8 @@ public class TestConfiguration extends TestCase {
         localPath.contains(" "));
     }
   }
-  
+
+  @Test
   public void testGetFile() throws IOException {
     Configuration conf = new Configuration();
     String[] dirs = new String[]{"a", "b", "c"};
@@ -707,29 +725,32 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testToString() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    
-    String expectedOutput = 
-      "Configuration: core-default.xml, core-site.xml, " + 
+
+    String expectedOutput =
+      "Configuration: core-default.xml, core-site.xml, " +
       fileResource.toString();
     assertEquals(expectedOutput, conf.toString());
   }
-  
+
+  @Test
   public void testWriteXml() throws IOException {
     Configuration conf = new Configuration();
-    ByteArrayOutputStream baos = new ByteArrayOutputStream(); 
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
     conf.writeXml(baos);
     String result = baos.toString();
     assertTrue("Result has proper header", result.startsWith(XMLHEADER));
-	  
+
     assertTrue("Result has proper footer", result.endsWith("</configuration>"));
   }
-  
+
+  @Test
   public void testIncludes() throws Exception {
     tearDown();
     System.out.println("XXX testIncludes");
@@ -758,8 +779,8 @@ public class TestConfiguration extends TestCase {
     // verify that the includes file contains all properties
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    assertEquals(conf.get("a"), "b"); 
-    assertEquals(conf.get("c"), "d"); 
+    assertEquals(conf.get("a"), "b");
+    assertEquals(conf.get("c"), "d");
     assertEquals(conf.get("e"), "f");
     assertEquals(conf.get("g"), "h");
     assertEquals(conf.get("i"), "j");
@@ -767,6 +788,7 @@ public class TestConfiguration extends TestCase {
     tearDown();
   }
 
+  @Test
   public void testCharsetInDocumentEncoding() throws Exception {
     tearDown();
     out=new BufferedWriter(new OutputStreamWriter(new FileOutputStream(CONFIG),
@@ -785,6 +807,7 @@ public class TestConfiguration extends TestCase {
     tearDown();
   }
 
+  @Test
   public void testEntityReference() throws Exception {
     tearDown();
     out=new BufferedWriter(new FileWriter(CONFIG));
@@ -803,6 +826,7 @@ public class TestConfiguration extends TestCase {
     tearDown();
   }
 
+  @Test
   public void testSystemEntityReference() throws Exception {
     tearDown();
     out=new BufferedWriter(new FileWriter(CONFIG2));
@@ -824,6 +848,7 @@ public class TestConfiguration extends TestCase {
     tearDown();
   }
 
+  @Test
   public void testIncludesWithFallback() throws Exception {
     tearDown();
     out=new BufferedWriter(new FileWriter(CONFIG2));
@@ -862,6 +887,7 @@ public class TestConfiguration extends TestCase {
     tearDown();
   }
 
+  @Test
   public void testRelativeIncludes() throws Exception {
     tearDown();
     String relConfig = new File("./tmp/test-config.xml").getAbsolutePath();
@@ -893,6 +919,7 @@ public class TestConfiguration extends TestCase {
     new File(new File(relConfig).getParent()).delete();
   }
 
+  @Test
   public void testIntegerRanges() {
     Configuration conf = new Configuration();
     conf.set("first", "-100");
@@ -923,7 +950,8 @@ public class TestConfiguration extends TestCase {
     assertEquals(true, range.isIncluded(34));
     assertEquals(true, range.isIncluded(100000000));
   }
-  
+
+  @Test
   public void testGetRangeIterator() throws Exception {
     Configuration config = new Configuration(false);
     IntegerRanges ranges = config.getRange("Test", "");
@@ -943,7 +971,7 @@ public class TestConfiguration extends TestCase {
       found.add(i);
     }
     assertEquals(expected, found);
-    
+
     ranges = config.getRange("Test", "8-12, 5- 7");
     expected = new HashSet<Integer>(Arrays.asList(5,6,7,8,9,10,11,12));
     found = new HashSet<Integer>();
@@ -953,6 +981,7 @@ public class TestConfiguration extends TestCase {
     assertEquals(expected, found);
   }
 
+  @Test
   public void testHexValues() throws IOException{
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -984,6 +1013,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testIntegerValues() throws IOException{
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1014,7 +1044,8 @@ public class TestConfiguration extends TestCase {
       // pass
     }
   }
-  
+
+  @Test
   public void testHumanReadableValues() throws IOException {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1035,6 +1066,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testBooleanValues() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1058,7 +1090,8 @@ public class TestConfiguration extends TestCase {
     assertEquals(false, conf.getBoolean("test.bool7", true));
     assertEquals(false, conf.getBoolean("test.bool8", false));
   }
-  
+
+  @Test
   public void testFloatValues() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1070,10 +1103,10 @@ public class TestConfiguration extends TestCase {
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    assertEquals(3.1415f, conf.getFloat("test.float1", 0.0f));
-    assertEquals(3.1415f, conf.getFloat("test.float2", 0.0f));
-    assertEquals(-3.1415f, conf.getFloat("test.float3", 0.0f));
-    assertEquals(-3.1415f, conf.getFloat("test.float4", 0.0f));
+    assertEquals(3.1415f, conf.getFloat("test.float1", 0.0f), DOUBLE_DELTA);
+    assertEquals(3.1415f, conf.getFloat("test.float2", 0.0f), DOUBLE_DELTA);
+    assertEquals(-3.1415f, conf.getFloat("test.float3", 0.0f), DOUBLE_DELTA);
+    assertEquals(-3.1415f, conf.getFloat("test.float4", 0.0f), DOUBLE_DELTA);
     try {
       conf.getFloat("test.float5", 0.0f);
       fail("Property had invalid float value, but was read successfully.");
@@ -1081,7 +1114,8 @@ public class TestConfiguration extends TestCase {
       // pass
     }
   }
-  
+
+  @Test
   public void testDoubleValues() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1093,10 +1127,10 @@ public class TestConfiguration extends TestCase {
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    assertEquals(3.1415, conf.getDouble("test.double1", 0.0));
-    assertEquals(3.1415, conf.getDouble("test.double2", 0.0));
-    assertEquals(-3.1415, conf.getDouble("test.double3", 0.0));
-    assertEquals(-3.1415, conf.getDouble("test.double4", 0.0));
+    assertEquals(3.1415, conf.getDouble("test.double1", 0.0), DOUBLE_DELTA);
+    assertEquals(3.1415, conf.getDouble("test.double2", 0.0), DOUBLE_DELTA);
+    assertEquals(-3.1415, conf.getDouble("test.double3", 0.0), DOUBLE_DELTA);
+    assertEquals(-3.1415, conf.getDouble("test.double4", 0.0), DOUBLE_DELTA);
     try {
       conf.getDouble("test.double5", 0.0);
       fail("Property had invalid double value, but was read successfully.");
@@ -1104,7 +1138,8 @@ public class TestConfiguration extends TestCase {
       // pass
     }
   }
-  
+
+  @Test
   public void testGetClass() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1113,10 +1148,13 @@ public class TestConfiguration extends TestCase {
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    assertEquals("java.lang.Integer", conf.getClass("test.class1", null).getCanonicalName());
-    assertEquals("java.lang.Integer", conf.getClass("test.class2", null).getCanonicalName());
+    assertEquals("java.lang.Integer",
+        conf.getClass("test.class1", null).getCanonicalName());
+    assertEquals("java.lang.Integer",
+        conf.getClass("test.class2", null).getCanonicalName());
   }
-  
+
+  @Test
   public void testGetClasses() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1132,14 +1170,15 @@ public class TestConfiguration extends TestCase {
     assertArrayEquals(expectedNames, extractClassNames(classes1));
     assertArrayEquals(expectedNames, extractClassNames(classes2));
   }
-  
+
+  @Test
   public void testGetStringCollection() {
     Configuration c = new Configuration();
     c.set("x", " a, b\n,\nc ");
     Collection<String> strs = c.getTrimmedStringCollection("x");
     assertEquals(3, strs.size());
     assertArrayEquals(new String[]{ "a", "b", "c" },
-                      strs.toArray(new String[0]));
+        strs.toArray(new String[0]));
 
     // Check that the result is mutable
     strs.add("z");
@@ -1150,13 +1189,14 @@ public class TestConfiguration extends TestCase {
     strs.add("z");
   }
 
+  @Test
   public void testGetTrimmedStringCollection() {
     Configuration c = new Configuration();
     c.set("x", "a, b, c");
     Collection<String> strs = c.getStringCollection("x");
     assertEquals(3, strs.size());
     assertArrayEquals(new String[]{ "a", " b", " c" },
-                      strs.toArray(new String[0]));
+        strs.toArray(new String[0]));
 
     // Check that the result is mutable
     strs.add("z");
@@ -1174,9 +1214,10 @@ public class TestConfiguration extends TestCase {
     }
     return classNames;
   }
-  
+
   enum Dingo { FOO, BAR };
   enum Yak { RAB, FOO };
+  @Test
   public void testEnum() {
     Configuration conf = new Configuration();
     conf.setEnum("test.enum", Dingo.FOO);
@@ -1193,6 +1234,7 @@ public class TestConfiguration extends TestCase {
     assertTrue(fail);
   }
 
+  @Test
   public void testEnumFromXml() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG_FOR_ENUM));
     startConfig();
@@ -1213,6 +1255,7 @@ public class TestConfiguration extends TestCase {
     assertTrue(fail);
   }
 
+  @Test
   public void testTimeDuration() {
     Configuration conf = new Configuration(false);
     conf.setTimeDuration("test.time.a", 7L, SECONDS);
@@ -1246,17 +1289,18 @@ public class TestConfiguration extends TestCase {
     assertEquals(30L, conf.getTimeDuration("test.time.d", 40, SECONDS));
 
     for (Configuration.ParsedTimeDuration ptd :
-         Configuration.ParsedTimeDuration.values()) {
+        Configuration.ParsedTimeDuration.values()) {
       conf.setTimeDuration("test.time.unit", 1, ptd.unit());
       assertEquals(1 + ptd.suffix(), conf.get("test.time.unit"));
       assertEquals(1, conf.getTimeDuration("test.time.unit", 2, ptd.unit()));
     }
   }
 
+  @Test
   public void testTimeDurationWarning() {
     // check warn for possible loss of precision
     final String warnFormat = "Possible loss of precision converting %s" +
-            " to %s for test.time.warn";
+        " to %s for test.time.warn";
     final ArrayList<String> warnchk = new ArrayList<>();
     Configuration wconf = new Configuration(false) {
       @Override
@@ -1290,6 +1334,7 @@ public class TestConfiguration extends TestCase {
     assertEquals(2, warnchk.size());
   }
 
+  @Test
   public void testPattern() throws IOException {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1303,20 +1348,21 @@ public class TestConfiguration extends TestCase {
     Pattern defaultPattern = Pattern.compile("x+");
     // Return default if missing
     assertEquals(defaultPattern.pattern(),
-                 conf.getPattern("xxxxx", defaultPattern).pattern());
+        conf.getPattern("xxxxx", defaultPattern).pattern());
     // Return null if empty and default is null
     assertNull(conf.getPattern("test.pattern1", null));
     // Return default for empty
     assertEquals(defaultPattern.pattern(),
-                 conf.getPattern("test.pattern1", defaultPattern).pattern());
+        conf.getPattern("test.pattern1", defaultPattern).pattern());
     // Return default for malformed
     assertEquals(defaultPattern.pattern(),
-                 conf.getPattern("test.pattern2", defaultPattern).pattern());
+        conf.getPattern("test.pattern2", defaultPattern).pattern());
     // Works for correct patterns
     assertEquals("a+b",
-                 conf.getPattern("test.pattern3", defaultPattern).pattern());
+        conf.getPattern("test.pattern3", defaultPattern).pattern());
   }
 
+  @Test
   public void testPropertySource() throws IOException {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1329,17 +1375,19 @@ public class TestConfiguration extends TestCase {
     assertEquals(1, sources.length);
     assertEquals(
         "Resource string returned for a file-loaded property" +
-        " must be a proper absolute path",
+            " must be a proper absolute path",
         fileResource,
         new Path(sources[0]));
     assertArrayEquals("Resource string returned for a set() property must be " +
-        "\"programmatically\"",
+            "\"programmatically\"",
         new String[]{"programmatically"},
         conf.getPropertySources("fs.defaultFS"));
-    assertEquals("Resource string returned for an unset property must be null",
+    assertArrayEquals("Resource string returned for an unset property must "
+            + "be null",
         null, conf.getPropertySources("fs.defaultFoo"));
   }
-  
+
+  @Test
   public void testMultiplePropertySource() throws IOException {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1354,24 +1402,25 @@ public class TestConfiguration extends TestCase {
     assertEquals("c", sources[2]);
     assertEquals(
         "Resource string returned for a file-loaded property" +
-        " must be a proper absolute path",
+            " must be a proper absolute path",
         fileResource,
         new Path(sources[3]));
   }
 
+  @Test
   public void testSocketAddress() {
     Configuration conf = new Configuration();
     final String defaultAddr = "host:1";
     final int defaultPort = 2;
     InetSocketAddress addr = null;
-    
+
     addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
     assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
-    
+
     conf.set("myAddress", "host2");
     addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
     assertEquals("host2:"+defaultPort, NetUtils.getHostPortString(addr));
-    
+
     conf.set("myAddress", "host2:3");
     addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
     assertEquals("host2:3", NetUtils.getHostPortString(addr));
@@ -1387,35 +1436,38 @@ public class TestConfiguration extends TestCase {
     } catch (IllegalArgumentException iae) {
       threwException = true;
       assertEquals("Does not contain a valid host:port authority: " +
-                   "bad:-port (configuration property 'myAddress')",
-                   iae.getMessage());
-      
+              "bad:-port (configuration property 'myAddress')",
+          iae.getMessage());
+
     } finally {
       assertTrue(threwException);
     }
   }
 
+  @Test
   public void testSetSocketAddress() {
     Configuration conf = new Configuration();
     NetUtils.addStaticResolution("host", "127.0.0.1");
     final String defaultAddr = "host:1";
-    
-    InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);    
+
+    InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
     conf.setSocketAddr("myAddress", addr);
     assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
   }
-  
+
+  @Test
   public void testUpdateSocketAddress() throws IOException {
     InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
     InetSocketAddress connectAddr = conf.updateConnectAddr("myAddress", addr);
     assertEquals(connectAddr.getHostName(), addr.getHostName());
-    
+
     addr = new InetSocketAddress(1);
     connectAddr = conf.updateConnectAddr("myAddress", addr);
     assertEquals(connectAddr.getHostName(),
-                 InetAddress.getLocalHost().getHostName());
+        InetAddress.getLocalHost().getHostName());
   }
 
+  @Test
   public void testReload() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1424,7 +1476,7 @@ public class TestConfiguration extends TestCase {
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    
+
     out=new BufferedWriter(new FileWriter(CONFIG2));
     startConfig();
     appendProperty("test.key1", "value1");
@@ -1432,23 +1484,23 @@ public class TestConfiguration extends TestCase {
     endConfig();
     Path fileResource1 = new Path(CONFIG2);
     conf.addResource(fileResource1);
-    
+
     // add a few values via set.
     conf.set("test.key3", "value4");
     conf.set("test.key4", "value5");
-    
+
     assertEquals("final-value1", conf.get("test.key1"));
     assertEquals("value2", conf.get("test.key2"));
     assertEquals("value4", conf.get("test.key3"));
     assertEquals("value5", conf.get("test.key4"));
-    
+
     // change values in the test file...
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     appendProperty("test.key1", "final-value1");
     appendProperty("test.key3", "final-value3", true);
     endConfig();
-    
+
     conf.reloadConfiguration();
     assertEquals("value1", conf.get("test.key1"));
     // overlayed property overrides.
@@ -1457,6 +1509,7 @@ public class TestConfiguration extends TestCase {
     assertEquals("value5", conf.get("test.key4"));
   }
 
+  @Test
   public void testSize() {
     Configuration conf = new Configuration(false);
     conf.set("a", "A");
@@ -1464,6 +1517,7 @@ public class TestConfiguration extends TestCase {
     assertEquals(2, conf.size());
   }
 
+  @Test
   public void testClear() {
     Configuration conf = new Configuration(false);
     conf.set("a", "A");
@@ -1476,6 +1530,7 @@ public class TestConfiguration extends TestCase {
   public static class Fake_ClassLoader extends ClassLoader {
   }
 
+  @Test
   public void testClassLoader() {
     Configuration conf = new Configuration(false);
     conf.setQuietMode(false);
@@ -1483,7 +1538,7 @@ public class TestConfiguration extends TestCase {
     Configuration other = new Configuration(conf);
     assertTrue(other.getClassLoader() instanceof Fake_ClassLoader);
   }
-  
+
   static class JsonConfiguration {
     JsonProperty[] properties;
 
@@ -1546,6 +1601,7 @@ public class TestConfiguration extends TestCase {
     return ac;
   }
 
+  @Test
   public void testGetSetTrimmedNames() throws IOException {
     Configuration conf = new Configuration(false);
     conf.set(" name", "value");
@@ -1554,6 +1610,7 @@ public class TestConfiguration extends TestCase {
     assertEquals("value", conf.getRaw("  name  "));
   }
 
+  @Test
   public void testDumpProperty() throws IOException {
     StringWriter outWriter = new StringWriter();
     ObjectMapper mapper = new ObjectMapper();
@@ -1668,15 +1725,16 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testDumpConfiguration() throws IOException {
     StringWriter outWriter = new StringWriter();
     Configuration.dumpConfiguration(conf, outWriter);
     String jsonStr = outWriter.toString();
     ObjectMapper mapper = new ObjectMapper();
-    JsonConfiguration jconf = 
-      mapper.readValue(jsonStr, JsonConfiguration.class);
+    JsonConfiguration jconf =
+        mapper.readValue(jsonStr, JsonConfiguration.class);
     int defaultLength = jconf.getProperties().length;
-    
+
     // add 3 keys to the existing configuration properties
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1687,7 +1745,7 @@ public class TestConfiguration extends TestCase {
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
     out.close();
-    
+
     outWriter = new StringWriter();
     Configuration.dumpConfiguration(conf, outWriter);
     jsonStr = outWriter.toString();
@@ -1696,7 +1754,7 @@ public class TestConfiguration extends TestCase {
     int length = jconf.getProperties().length;
     // check for consistency in the number of properties parsed in Json format.
     assertEquals(length, defaultLength+3);
-    
+
     //change few keys in another resource file
     out=new BufferedWriter(new FileWriter(CONFIG2));
     startConfig();
@@ -1706,14 +1764,14 @@ public class TestConfiguration extends TestCase {
     Path fileResource1 = new Path(CONFIG2);
     conf.addResource(fileResource1);
     out.close();
-    
+
     outWriter = new StringWriter();
     Configuration.dumpConfiguration(conf, outWriter);
     jsonStr = outWriter.toString();
     mapper = new ObjectMapper();
     jconf = mapper.readValue(jsonStr, JsonConfiguration.class);
-    
-    // put the keys and their corresponding attributes into a hashmap for their 
+
+    // put the keys and their corresponding attributes into a hashmap for their
     // efficient retrieval
     HashMap<String,JsonProperty> confDump = new HashMap<String,JsonProperty>();
     for(JsonProperty prop : jconf.getProperties()) {
@@ -1724,7 +1782,7 @@ public class TestConfiguration extends TestCase {
     assertEquals(false, confDump.get("test.key1").getIsFinal());
     assertEquals(fileResource1.toString(),
         confDump.get("test.key1").getResource());
-    // check if final parameter test.key2 is not changed, since it is first 
+    // check if final parameter test.key2 is not changed, since it is first
     // loaded as final parameter
     assertEquals("value2", confDump.get("test.key2").getValue());
     assertEquals(true, confDump.get("test.key2").getIsFinal());
@@ -1735,7 +1793,7 @@ public class TestConfiguration extends TestCase {
     assertEquals(false, confDump.get("test.key3").getIsFinal());
     assertEquals(fileResource.toString(),
         confDump.get("test.key3").getResource());
-    // check for resource to be "Unknown" for keys which are loaded using 'set' 
+    // check for resource to be "Unknown" for keys which are loaded using 'set'
     // and expansion of properties
     conf.set("test.key4", "value4");
     conf.set("test.key5", "value5");
@@ -1753,7 +1811,8 @@ public class TestConfiguration extends TestCase {
     assertEquals("programmatically", confDump.get("test.key4").getResource());
     outWriter.close();
   }
-  
+
+  @Test
   public void testDumpConfiguratioWithoutDefaults() throws IOException {
     // check for case when default resources are not loaded
     Configuration config = new Configuration(false);
@@ -1761,12 +1820,12 @@ public class TestConfiguration extends TestCase {
     Configuration.dumpConfiguration(config, outWriter);
     String jsonStr = outWriter.toString();
     ObjectMapper mapper = new ObjectMapper();
-    JsonConfiguration jconf = 
-      mapper.readValue(jsonStr, JsonConfiguration.class);
-    
+    JsonConfiguration jconf =
+        mapper.readValue(jsonStr, JsonConfiguration.class);
+
     //ensure that no properties are loaded.
     assertEquals(0, jconf.getProperties().length);
-    
+
     // add 2 keys
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1776,13 +1835,13 @@ public class TestConfiguration extends TestCase {
     Path fileResource = new Path(CONFIG);
     config.addResource(fileResource);
     out.close();
-    
+
     outWriter = new StringWriter();
     Configuration.dumpConfiguration(config, outWriter);
     jsonStr = outWriter.toString();
     mapper = new ObjectMapper();
     jconf = mapper.readValue(jsonStr, JsonConfiguration.class);
-    
+
     HashMap<String, JsonProperty>confDump = new HashMap<String, JsonProperty>();
     for (JsonProperty prop : jconf.getProperties()) {
       confDump.put(prop.getKey(), prop);
@@ -1801,6 +1860,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testDumpSensitiveProperty() throws IOException {
     final String myPassword = "ThisIsMyPassword";
     Configuration testConf = new Configuration(false);
@@ -1818,6 +1878,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testDumpSensitiveConfiguration() throws IOException {
     final String myPassword = "ThisIsMyPassword";
     Configuration testConf = new Configuration(false);
@@ -1835,6 +1896,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testGetValByRegex() {
     Configuration conf = new Configuration();
     String key1 = "t.abc.key1";
@@ -1853,10 +1915,11 @@ public class TestConfiguration extends TestCase {
     assertTrue("Picked out wrong key " + key4, !res.containsKey(key4));
   }
 
+  @Test
   public void testGetClassesShouldReturnDefaultValue() throws Exception {
     Configuration config = new Configuration();
-    Class<?>[] classes = 
-      config.getClasses("testClassName", Configuration.class);
+    Class<?>[] classes =
+        config.getClasses("testClassName", Configuration.class);
     assertEquals(
         "Not returning expected number of classes. Number of returned classes ="
             + classes.length, 1, classes.length);
@@ -1864,6 +1927,7 @@ public class TestConfiguration extends TestCase {
         classes[0]);
   }
 
+  @Test
   public void testGetClassesShouldReturnEmptyArray()
       throws Exception {
     Configuration config = new Configuration();
@@ -1873,7 +1937,8 @@ public class TestConfiguration extends TestCase {
         "Not returning expected number of classes. Number of returned classes ="
             + classes.length, 0, classes.length);
   }
-  
+
+  @Test
   public void testSettingValueNull() throws Exception {
     Configuration config = new Configuration();
     try {
@@ -1886,6 +1951,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testSettingKeyNull() throws Exception {
     Configuration config = new Configuration();
     try {
@@ -1897,6 +1963,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testInvalidSubstitution() {
     final Configuration configuration = new Configuration(false);
 
@@ -1913,6 +1980,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testIncompleteSubbing() {
     Configuration configuration = new Configuration(false);
     String key = "test.random.key";
@@ -1931,6 +1999,7 @@ public class TestConfiguration extends TestCase {
     }
   }
 
+  @Test
   public void testBoolean() {
     boolean value = true;
     Configuration configuration = new Configuration();
@@ -1938,6 +2007,7 @@ public class TestConfiguration extends TestCase {
     assertEquals(value, configuration.getBoolean("value", false));
   }
 
+  @Test
   public void testBooleanIfUnset() {
     boolean value = true;
     Configuration configuration = new Configuration();
@@ -1947,20 +2017,23 @@ public class TestConfiguration extends TestCase {
     assertEquals(value, configuration.getBoolean("value", false));
   }
 
+  @Test
   public void testFloat() {
     float value = 1.0F;
     Configuration configuration = new Configuration();
     configuration.setFloat("value", value);
-    assertEquals(value, configuration.getFloat("value", 0.0F));
+    assertEquals(value, configuration.getFloat("value", 0.0F), DOUBLE_DELTA);
   }
-  
+
+  @Test
   public void testDouble() {
     double value = 1.0D;
     Configuration configuration = new Configuration();
     configuration.setDouble("value", value);
-    assertEquals(value, configuration.getDouble("value", 0.0D));
+    assertEquals(value, configuration.getDouble("value", 0.0D), DOUBLE_DELTA);
   }
 
+  @Test
   public void testInt() {
     int value = 1;
     Configuration configuration = new Configuration();
@@ -1968,6 +2041,7 @@ public class TestConfiguration extends TestCase {
     assertEquals(value, configuration.getInt("value", 0));
   }
 
+  @Test
   public void testLong() {
     long value = 1L;
     Configuration configuration = new Configuration();
@@ -1975,16 +2049,18 @@ public class TestConfiguration extends TestCase {
     assertEquals(value, configuration.getLong("value", 0L));
   }
 
+  @Test
   public void testStrings() {
     String [] strings = {"FOO","BAR"};
     Configuration configuration = new Configuration();
     configuration.setStrings("strings", strings);
     String [] returnStrings = configuration.getStrings("strings");
     for(int i=0;i<returnStrings.length;i++) {
-       assertEquals(strings[i], returnStrings[i]);
+      assertEquals(strings[i], returnStrings[i]);
     }
   }
-  
+
+  @Test
   public void testSetPattern() {
     Pattern testPattern = Pattern.compile("a+b");
     Configuration configuration = new Configuration();
@@ -1992,13 +2068,15 @@ public class TestConfiguration extends TestCase {
     assertEquals(testPattern.pattern(),
         configuration.getPattern("testPattern", Pattern.compile("")).pattern());
   }
-  
+
+  @Test
   public void testGetClassByNameOrNull() throws Exception {
-   Configuration config = new Configuration();
-   Class<?> clazz = config.getClassByNameOrNull("java.lang.Object");
-   assertNotNull(clazz);
+    Configuration config = new Configuration();
+    Class<?> clazz = config.getClassByNameOrNull("java.lang.Object");
+    assertNotNull(clazz);
   }
 
+  @Test
   public void testGetFinalParameters() throws Exception {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -2021,6 +2099,7 @@ public class TestConfiguration extends TestCase {
    * by SPARK-2546.
    * @throws Exception
    */
+  @Test
   public void testConcurrentAccesses() throws Exception {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -2061,6 +2140,7 @@ public class TestConfiguration extends TestCase {
     // it's expected behaviour.
   }
 
+  @Test
   public void testNullValueProperties() throws Exception {
     Configuration conf = new Configuration();
     conf.setAllowNullValueProperties(true);
@@ -2074,6 +2154,7 @@ public class TestConfiguration extends TestCase {
     assertEquals("value", conf.get("attr"));
   }
 
+  @Test
   public void testGetPasswordDeprecatedKeyStored() throws Exception {
     final String oldKey = "test.password.old.key";
     final String newKey = "test.password.new.key";
@@ -2093,14 +2174,15 @@ public class TestConfiguration extends TestCase {
 
     Configuration.addDeprecation(oldKey, newKey);
 
-    Assert.assertThat(conf.getPassword(newKey),
+    assertThat(conf.getPassword(newKey),
         CoreMatchers.is(password.toCharArray()));
-    Assert.assertThat(conf.getPassword(oldKey),
+    assertThat(conf.getPassword(oldKey),
         CoreMatchers.is(password.toCharArray()));
 
     FileUtil.fullyDelete(tmpDir);
   }
 
+  @Test
   public void testGetPasswordByDeprecatedKey() throws Exception {
     final String oldKey = "test.password.old.key";
     final String newKey = "test.password.new.key";
@@ -2120,9 +2202,9 @@ public class TestConfiguration extends TestCase {
 
     Configuration.addDeprecation(oldKey, newKey);
 
-    Assert.assertThat(conf.getPassword(newKey),
+    assertThat(conf.getPassword(newKey),
         CoreMatchers.is(password.toCharArray()));
-    Assert.assertThat(conf.getPassword(oldKey),
+    assertThat(conf.getPassword(oldKey),
         CoreMatchers.is(password.toCharArray()));
 
     FileUtil.fullyDelete(tmpDir);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java
index fd2fa38..e15e699 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.conf;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import java.util.Properties;
 
@@ -25,11 +26,12 @@ import java.util.Properties;
  * Created 21-Jan-2009 13:42:36
  */
 
-public class TestConfigurationSubclass extends TestCase {
+public class TestConfigurationSubclass {
   private static final String EMPTY_CONFIGURATION_XML
           = "/org/apache/hadoop/conf/empty-configuration.xml";
 
 
+  @Test
   public void testGetProps() {
     SubConf conf = new SubConf(true);
     Properties properties = conf.getProperties();
@@ -37,6 +39,7 @@ public class TestConfigurationSubclass extends TestCase {
             properties.getProperty("hadoop.tmp.dir"));
   }
 
+  @Test
   public void testReload() throws Throwable {
     SubConf conf = new SubConf(true);
     assertFalse(conf.isReloaded());
@@ -45,6 +48,7 @@ public class TestConfigurationSubclass extends TestCase {
     Properties properties = conf.getProperties();
   }
 
+  @Test
   public void testReloadNotQuiet() throws Throwable {
     SubConf conf = new SubConf(true);
     conf.setQuietMode(false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
index 167daa5..fd01650 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
@@ -21,15 +21,14 @@ package org.apache.hadoop.conf;
 import java.io.ByteArrayOutputStream;
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.junit.Test;
+import static org.junit.Assert.*;
 
-import junit.framework.TestCase;
-
-public class TestDeprecatedKeys extends TestCase {
+public class TestDeprecatedKeys {
  
   //Tests a deprecated key
+  @Test
   public void testDeprecatedKeys() throws Exception {
     Configuration conf = new Configuration();
     conf.set("topology.script.file.name", "xyz");
@@ -39,6 +38,7 @@ public class TestDeprecatedKeys extends TestCase {
   }
   
   //Tests reading / writing a conf file with deprecation after setting
+  @Test
   public void testReadWriteWithDeprecatedKeys() throws Exception {
     Configuration conf = new Configuration();
     conf.setBoolean("old.config.yet.to.be.deprecated", true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java
index 57b7ff4..bc08e66 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java
@@ -18,10 +18,11 @@
 package org.apache.hadoop.conf;
 
 import java.util.List;
+import org.junit.Test;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
 
-public class TestGetInstances extends TestCase {
+public class TestGetInstances {
   
   interface SampleInterface {}
   
@@ -30,7 +31,7 @@ public class TestGetInstances extends TestCase {
   static class SampleClass implements SampleInterface {
     SampleClass() {}
   }
-	
+
   static class AnotherClass implements ChildInterface {
     AnotherClass() {}
   }
@@ -39,6 +40,7 @@ public class TestGetInstances extends TestCase {
    * Makes sure <code>Configuration.getInstances()</code> returns
    * instances of the required type.
    */
+  @Test
   public void testGetInstances() throws Exception {
     Configuration conf = new Configuration();
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java
index 4009a60..f182fe5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java
@@ -24,9 +24,10 @@ import java.io.OutputStreamWriter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestAvroFSInput extends TestCase {
+public class TestAvroFSInput {
 
   private static final String INPUT_DIR = "AvroFSInput";
 
@@ -34,6 +35,7 @@ public class TestAvroFSInput extends TestCase {
     return new Path(GenericTestUtils.getTempPath(INPUT_DIR));
   }
 
+  @Test
   public void testAFSInput() throws Exception {
     Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getLocal(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
index 615d0b5..a22b765 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.fs;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -29,16 +32,16 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 
 /** This test makes sure that "DU" does not get to run on each call to getUsed */
-public class TestDU extends TestCase {
+public class TestDU {
   final static private File DU_DIR = GenericTestUtils.getTestDir("dutmp");
 
-  @Override
+  @Before
   public void setUp() {
-      FileUtil.fullyDelete(DU_DIR);
-      assertTrue(DU_DIR.mkdirs());
+    FileUtil.fullyDelete(DU_DIR);
+    assertTrue(DU_DIR.mkdirs());
   }
 
-  @Override
+  @After
   public void tearDown() throws IOException {
       FileUtil.fullyDelete(DU_DIR);
   }
@@ -69,6 +72,7 @@ public class TestDU extends TestCase {
    * @throws IOException
    * @throws InterruptedException
    */
+  @Test
   public void testDU() throws IOException, InterruptedException {
     final int writtenSize = 32*1024;   // writing 32K
     // Allow for extra 4K on-disk slack for local file systems
@@ -107,6 +111,8 @@ public class TestDU extends TestCase {
         duSize >= writtenSize &&
         writtenSize <= (duSize + slack));
   }
+
+  @Test
   public void testDUGetUsedWillNotReturnNegative() throws IOException {
     File file = new File(DU_DIR, "data");
     assertTrue(file.createNewFile());
@@ -118,6 +124,7 @@ public class TestDU extends TestCase {
     assertTrue(String.valueOf(duSize), duSize >= 0L);
   }
 
+  @Test
   public void testDUSetInitialValue() throws IOException {
     File file = new File(DU_DIR, "dataX");
     createFile(file, 8192);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
index a2f0905..5ed743f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
@@ -23,12 +23,12 @@ import java.lang.reflect.Modifier;
 import java.net.URI;
 import java.util.Iterator;
 
-import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
+import org.junit.Test;
 
-public class TestFilterFs extends TestCase {
+public class TestFilterFs {
 
   private static final Log LOG = FileSystem.LOG;
 
@@ -41,7 +41,8 @@ public class TestFilterFs extends TestCase {
       return null;
     }
   }
-  
+
+  @Test
   public void testFilterFileSystem() throws Exception {
     for (Method m : AbstractFileSystem.class.getDeclaredMethods()) {
       if (Modifier.isStatic(m.getModifiers()))
@@ -69,6 +70,7 @@ public class TestFilterFs extends TestCase {
   
   // Test that FilterFs will accept an AbstractFileSystem to be filtered which
   // has an optional authority, such as ViewFs
+  @Test
   public void testFilteringWithNonrequiredAuthority() throws Exception {
     Configuration conf = new Configuration();
     ConfigUtil.addLink(conf, "custom", "/mnt", URI.create("file:///"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java
index 87265f4..f43480e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java
@@ -22,7 +22,10 @@ import java.util.Arrays;
 import java.util.Comparator;
 import java.util.Random;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -30,7 +33,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 /**
  * Testing the correctness of FileSystem.getFileBlockLocations.
  */
-public class TestGetFileBlockLocations extends TestCase {
+public class TestGetFileBlockLocations {
   private static String TEST_ROOT_DIR = GenericTestUtils.getTempPath(
       "testGetFileBlockLocations");
   private static final int FileLength = 4 * 1024 * 1024; // 4MB
@@ -39,11 +42,8 @@ public class TestGetFileBlockLocations extends TestCase {
   private FileSystem fs;
   private Random random;
 
-  /**
-   * @see TestCase#setUp()
-   */
-  @Override
-  protected void setUp() throws IOException {
+  @Before
+  public void setUp() throws IOException {
     conf = new Configuration();
     Path rootPath = new Path(TEST_ROOT_DIR);
     path = new Path(rootPath, "TestGetFileBlockLocations");
@@ -91,15 +91,14 @@ public class TestGetFileBlockLocations extends TestCase {
       assertTrue(locations.length == 0);
     }
   }
-  /**
-   * @see TestCase#tearDown()
-   */
-  @Override
-  protected void tearDown() throws IOException {
+
+  @After
+  public void tearDown() throws IOException {
     fs.delete(path, true);
     fs.close();
   }
 
+  @Test
   public void testFailureNegativeParameters() throws IOException {
     FileStatus status = fs.getFileStatus(path);
     try {
@@ -117,6 +116,7 @@ public class TestGetFileBlockLocations extends TestCase {
     }
   }
 
+  @Test
   public void testGetFileBlockLocations1() throws IOException {
     FileStatus status = fs.getFileStatus(path);
     oneTest(0, (int) status.getLen(), status);
@@ -130,6 +130,7 @@ public class TestGetFileBlockLocations extends TestCase {
     }
   }
 
+  @Test
   public void testGetFileBlockLocations2() throws IOException {
     FileStatus status = fs.getFileStatus(path);
     for (int i = 0; i < 1000; ++i) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java
index b0466b8..9d75ba0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.fs;
 import java.io.IOException;
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestGlobExpander extends TestCase {
+public class TestGlobExpander {
 
+  @Test
   public void testExpansionIsIdentical() throws IOException {
     checkExpansionIsIdentical("");
     checkExpansionIsIdentical("/}");
@@ -35,6 +37,7 @@ public class TestGlobExpander extends TestCase {
     checkExpansionIsIdentical("p{a\\/b,c\\/d}s");
   }
 
+  @Test
   public void testExpansion() throws IOException {
     checkExpansion("{a/b}", "a/b");
     checkExpansion("/}{a/b}", "/}a/b");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 7a5b25e..12aed29 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -33,20 +33,21 @@ import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.TrashPolicyDefault.Emptier;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Test;
 
 /**
  * This class tests commands from Trash.
  */
-public class TestTrash extends TestCase {
+public class TestTrash {
 
   private final static Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
       "testTrash"));
@@ -507,19 +508,22 @@ public class TestTrash extends TestCase {
     }
   }
 
+  @Test
   public void testTrash() throws IOException {
     Configuration conf = new Configuration();
     conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
     trashShell(FileSystem.getLocal(conf), TEST_DIR);
   }
 
+  @Test
   public void testNonDefaultFS() throws IOException {
     Configuration conf = new Configuration();
     conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
     conf.set("fs.defaultFS", "invalid://host/bar/foo");
     trashNonDefaultFS(conf);
   }
-  
+
+  @Test
   public void testPluggableTrash() throws IOException {
     Configuration conf = new Configuration();
 
@@ -604,6 +608,7 @@ public class TestTrash extends TestCase {
     verifyTrashPermission(FileSystem.getLocal(conf), conf);
   }
 
+  @Test
   public void testTrashEmptier() throws Exception {
     Configuration conf = new Configuration();
     // Trash with 12 second deletes and 6 seconds checkpoints
@@ -665,12 +670,9 @@ public class TestTrash extends TestCase {
     emptierThread.interrupt();
     emptierThread.join();
   }
-  
-  /**
-   * @see TestCase#tearDown()
-   */
-  @Override
-  protected void tearDown() throws IOException {
+
+  @After
+  public void tearDown() throws IOException {
     File trashDir = new File(TEST_DIR.toUri().getPath());
     if (trashDir.exists() && !FileUtil.fullyDelete(trashDir)) {
       throw new IOException("Cannot remove data directory: " + trashDir);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java
index 41c4d47..799471b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java
@@ -20,16 +20,17 @@ package org.apache.hadoop.fs;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Test;
 
 /**
  * test for the input truncation bug when mark/reset is used.
  * HADOOP-1489
  */
-public class TestTruncatedInputBug extends TestCase {
+public class TestTruncatedInputBug {
   private static String TEST_ROOT_DIR =
       GenericTestUtils.getTestDir().getAbsolutePath();
   
@@ -49,6 +50,7 @@ public class TestTruncatedInputBug extends TestCase {
    * checksum file system currently depends on the request size
    * >= bytesPerSum to work properly.
    */
+  @Test
   public void testTruncatedInputBug() throws IOException {
     final int ioBufSize = 512;
     final int fileSize = ioBufSize*4;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
index 6368a57..a22985d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
@@ -21,11 +21,14 @@ import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import static org.apache.hadoop.fs.permission.FsAction.*;
 
-public class TestFsPermission extends TestCase {
+public class TestFsPermission {
+
+  @Test
   public void testFsAction() {
     //implies
     for(FsAction a : FsAction.values()) {
@@ -53,6 +56,7 @@ public class TestFsPermission extends TestCase {
    * Ensure that when manually specifying permission modes we get
    * the expected values back out for all combinations
    */
+  @Test
   public void testConvertingPermissions() {
     for(short s = 0; s <= 01777; s++) {
       assertEquals(s, new FsPermission(s).toShort());
@@ -80,6 +84,7 @@ public class TestFsPermission extends TestCase {
     assertEquals(02000, s);
   }
 
+  @Test
   public void testSpecialBitsToString() {
     for (boolean sb : new boolean[] { false, true }) {
       for (FsAction u : FsAction.values()) {
@@ -106,6 +111,7 @@ public class TestFsPermission extends TestCase {
     }
   }
 
+  @Test
   public void testFsPermission() {
     String symbolic = "-rwxrwxrwx";
 
@@ -132,6 +138,7 @@ public class TestFsPermission extends TestCase {
     }
   }
 
+  @Test
   public void testSymbolicPermission() {
     for (int i = 0; i < SYMBOLIC.length; ++i) {
       short val = 0777;
@@ -146,6 +153,7 @@ public class TestFsPermission extends TestCase {
     }
   }
 
+  @Test
   public void testUMaskParser() throws IOException {
     Configuration conf = new Configuration();
     
@@ -163,6 +171,7 @@ public class TestFsPermission extends TestCase {
     }
   }
 
+  @Test
   public void testSymbolicUmasks() {
     Configuration conf = new Configuration();
     
@@ -176,6 +185,7 @@ public class TestFsPermission extends TestCase {
     assertEquals(0111, FsPermission.getUMask(conf).toShort());
   }
 
+  @Test
   public void testBadUmasks() {
     Configuration conf = new Configuration();
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
index 6b1cd29..d82a2f1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
@@ -25,7 +25,9 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.times;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
@@ -39,13 +41,12 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
 import org.mockito.Mockito;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
 
-public class TestFairCallQueue extends TestCase {
+public class TestFairCallQueue {
   private FairCallQueue<Schedulable> fcq;
 
   private Schedulable mockCall(String id, int priority) {
@@ -65,6 +66,7 @@ public class TestFairCallQueue extends TestCase {
   }
 
   @SuppressWarnings("deprecation")
+  @Before
   public void setUp() {
     Configuration conf = new Configuration();
     conf.setInt("ns." + FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
@@ -74,6 +76,7 @@ public class TestFairCallQueue extends TestCase {
 
   // Validate that the total capacity of all subqueues equals
   // the maxQueueSize for different values of maxQueueSize
+  @Test
   public void testTotalCapacityOfSubQueues() {
     Configuration conf = new Configuration();
     FairCallQueue<Schedulable> fairCallQueue;
@@ -291,11 +294,12 @@ public class TestFairCallQueue extends TestCase {
 
   //
   // Ensure that FairCallQueue properly implements BlockingQueue
-  //
+  @Test
   public void testPollReturnsNullWhenEmpty() {
     assertNull(fcq.poll());
   }
 
+  @Test
   public void testPollReturnsTopCallWhenNotEmpty() {
     Schedulable call = mockCall("c");
     assertTrue(fcq.offer(call));
@@ -306,6 +310,7 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(0, fcq.size());
   }
 
+  @Test
   public void testOfferSucceeds() {
 
     for (int i = 0; i < 5; i++) {
@@ -316,6 +321,7 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(5, fcq.size());
   }
 
+  @Test
   public void testOfferFailsWhenFull() {
     for (int i = 0; i < 5; i++) { assertTrue(fcq.offer(mockCall("c"))); }
 
@@ -324,6 +330,7 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(5, fcq.size());
   }
 
+  @Test
   public void testOfferSucceedsWhenScheduledLowPriority() {
     // Scheduler will schedule into queue 0 x 5, then queue 1
     int mockedPriorities[] = {0, 0, 0, 0, 0, 1, 0};
@@ -334,10 +341,12 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(6, fcq.size());
   }
 
+  @Test
   public void testPeekNullWhenEmpty() {
     assertNull(fcq.peek());
   }
 
+  @Test
   public void testPeekNonDestructive() {
     Schedulable call = mockCall("c", 0);
     assertTrue(fcq.offer(call));
@@ -347,6 +356,7 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(1, fcq.size());
   }
 
+  @Test
   public void testPeekPointsAtHead() {
     Schedulable call = mockCall("c", 0);
     Schedulable next = mockCall("b", 0);
@@ -356,10 +366,12 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(call, fcq.peek()); // Peek points at the head
   }
 
+  @Test
   public void testPollTimeout() throws InterruptedException {
     assertNull(fcq.poll(10, TimeUnit.MILLISECONDS));
   }
 
+  @Test
   public void testPollSuccess() throws InterruptedException {
     Schedulable call = mockCall("c", 0);
     assertTrue(fcq.offer(call));
@@ -369,6 +381,7 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(0, fcq.size());
   }
 
+  @Test
   public void testOfferTimeout() throws InterruptedException {
     for (int i = 0; i < 5; i++) {
       assertTrue(fcq.offer(mockCall("c"), 10, TimeUnit.MILLISECONDS));
@@ -380,6 +393,7 @@ public class TestFairCallQueue extends TestCase {
   }
 
   @SuppressWarnings("deprecation")
+  @Test
   public void testDrainTo() {
     Configuration conf = new Configuration();
     conf.setInt("ns." + FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
@@ -397,6 +411,7 @@ public class TestFairCallQueue extends TestCase {
   }
 
   @SuppressWarnings("deprecation")
+  @Test
   public void testDrainToWithLimit() {
     Configuration conf = new Configuration();
     conf.setInt("ns." + FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
@@ -413,16 +428,19 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(2, fcq2.size());
   }
 
+  @Test
   public void testInitialRemainingCapacity() {
     assertEquals(10, fcq.remainingCapacity());
   }
 
+  @Test
   public void testFirstQueueFullRemainingCapacity() {
     while (fcq.offer(mockCall("c"))) ; // Queue 0 will fill up first, then queue 1
 
     assertEquals(5, fcq.remainingCapacity());
   }
 
+  @Test
   public void testAllQueuesFullRemainingCapacity() {
     int[] mockedPriorities = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0};
     int i = 0;
@@ -432,6 +450,7 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(10, fcq.size());
   }
 
+  @Test
   public void testQueuesPartialFilledRemainingCapacity() {
     int[] mockedPriorities = {0, 1, 0, 1, 0};
     for (int i = 0; i < 5; i++) { fcq.offer(mockCall("c", mockedPriorities[i])); }
@@ -555,12 +574,14 @@ public class TestFairCallQueue extends TestCase {
   }
 
   // Make sure put will overflow into lower queues when the top is full
+  @Test
   public void testPutOverflows() throws InterruptedException {
     // We can fit more than 5, even though the scheduler suggests the top queue
     assertCanPut(fcq, 8, 8);
     assertEquals(8, fcq.size());
   }
 
+  @Test
   public void testPutBlocksWhenAllFull() throws InterruptedException {
     assertCanPut(fcq, 10, 10); // Fill up
     assertEquals(10, fcq.size());
@@ -569,10 +590,12 @@ public class TestFairCallQueue extends TestCase {
     assertCanPut(fcq, 0, 1); // Will block
   }
 
+  @Test
   public void testTakeBlocksWhenEmpty() throws InterruptedException {
     assertCanTake(fcq, 0, 1);
   }
 
+  @Test
   public void testTakeRemovesCall() throws InterruptedException {
     Schedulable call = mockCall("c");
     fcq.offer(call);
@@ -581,6 +604,7 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(0, fcq.size());
   }
 
+  @Test
   public void testTakeTriesNextQueue() throws InterruptedException {
 
     // A mux which only draws from q 0
@@ -597,6 +621,7 @@ public class TestFairCallQueue extends TestCase {
     assertEquals(0, fcq.size());
   }
 
+  @Test
   public void testFairCallQueueMXBean() throws Exception {
     MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
     ObjectName mxbeanName = new ObjectName(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
index 9fea50e..d41a587 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.log;
 
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.node.ContainerNode;
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.Time;
@@ -33,7 +34,6 @@ import org.apache.log4j.spi.HierarchyEventListener;
 import org.apache.log4j.spi.LoggerFactory;
 import org.apache.log4j.spi.LoggerRepository;
 import org.apache.log4j.spi.ThrowableInformation;
-import org.junit.Test;
 
 import java.io.IOException;
 import java.io.StringWriter;
@@ -42,7 +42,7 @@ import java.net.NoRouteToHostException;
 import java.util.Enumeration;
 import java.util.Vector;
 
-public class TestLog4Json extends TestCase {
+public class TestLog4Json {
 
   private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java
index e201787..0d0d5b1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java
@@ -19,15 +19,12 @@ package org.apache.hadoop.net;
 
 import java.util.ArrayList;
 import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-
-import junit.framework.TestCase;
 import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestScriptBasedMapping extends TestCase {
-
+import org.apache.hadoop.conf.Configuration;
 
+public class TestScriptBasedMapping {
   
   public TestScriptBasedMapping() {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
index 77da45b..8638591 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
@@ -19,13 +19,12 @@ package org.apache.hadoop.net;
 
 import java.util.ArrayList;
 import java.util.List;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 
-import junit.framework.TestCase;
-import org.junit.Test;
-
-public class TestScriptBasedMappingWithDependency extends TestCase {
+public class TestScriptBasedMappingWithDependency {
 
   
   public TestScriptBasedMappingWithDependency() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
index 64cd9b7..9fae536 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
@@ -17,19 +17,21 @@
 package org.apache.hadoop.security;
 
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
+import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import java.util.Map;
 
-public class TestAuthenticationFilter extends TestCase {
+public class TestAuthenticationFilter {
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testConfiguration() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.http.authentication.foo", "bar");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java
index 504f5a1..dac6a55 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java
@@ -16,8 +16,8 @@
  */
 package org.apache.hadoop.security;
 
-
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
@@ -30,9 +30,10 @@ import java.util.Map;
  * This class is tested for {@link AuthenticationWithProxyUserFilter}
  * to verify configurations of this filter.
  */
-public class TestAuthenticationWithProxyUserFilter extends TestCase {
+public class TestAuthenticationWithProxyUserFilter {
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testConfiguration() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.http.authentication.foo", "bar");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
index 684ef3b..03fc4cb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
@@ -21,17 +21,18 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.util.Map;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.WhitelistBasedResolver;
 import org.apache.hadoop.util.TestFileBasedIPList;
 
-public class TestWhitelistBasedResolver extends TestCase {
+public class TestWhitelistBasedResolver {
 
   public static final Map<String, String> SASL_PRIVACY_PROPS =
     WhitelistBasedResolver.getSaslProperties(new Configuration());
 
+  @Test
   public void testFixedVariableAndLocalWhiteList() throws IOException {
 
     String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
@@ -79,6 +80,7 @@ public class TestWhitelistBasedResolver extends TestCase {
    * Check  for inclusion in whitelist
    * Check for exclusion from whitelist
    */
+  @Test
   public void testFixedAndLocalWhiteList() throws IOException {
 
     String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
@@ -128,6 +130,7 @@ public class TestWhitelistBasedResolver extends TestCase {
    * Add a bunch of subnets and IPSs to the whitelist
    * Check  for inclusion in whitelist with a null value
    */
+  @Test
   public void testNullIPAddress() throws IOException {
 
     String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
index 1741eb7..f6e5133 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
@@ -25,11 +25,12 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
+import org.junit.Test;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
 
 /** Unit tests for Token */
-public class TestToken extends TestCase {
+public class TestToken {
 
   static boolean isEqual(Object a, Object b) {
     return a == null ? b == null : a.equals(b);
@@ -45,6 +46,7 @@ public class TestToken extends TestCase {
   /**
    * Test token serialization
    */
+  @Test
   public void testTokenSerialization() throws IOException {
     // Get a token
     Token<TokenIdentifier> sourceToken = new Token<TokenIdentifier>();
@@ -76,7 +78,8 @@ public class TestToken extends TestCase {
     }
   }
 
-  public static void testEncodeWritable() throws Exception {
+  @Test
+  public void testEncodeWritable() throws Exception {
     String[] values = new String[]{"", "a", "bb", "ccc", "dddd", "eeeee",
         "ffffff", "ggggggg", "hhhhhhhh", "iiiiiiiii",
         "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM" +
@@ -96,7 +99,8 @@ public class TestToken extends TestCase {
       checkUrlSafe(encode);
     }
   }
-  
+
+  @Test
   public void testDecodeIdentifier() throws IOException {
     TestDelegationTokenSecretManager secretManager =
       new TestDelegationTokenSecretManager(0, 0, 0, 0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7cbe38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
index 58935f2..f36c586 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
@@ -17,17 +17,15 @@
  */
 package org.apache.hadoop.util;
 
-import junit.framework.TestCase;
-
-import org.apache.hadoop.util.AsyncDiskService;
 import org.junit.Test;
+import static org.junit.Assert.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * A test for AsyncDiskService.
  */
-public class TestAsyncDiskService extends TestCase {
+public class TestAsyncDiskService {
   
   public static final Logger LOG =
       LoggerFactory.getLogger(TestAsyncDiskService.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HDFS-11032: [SPS]: Handling of block movement failure at the coordinator datanode. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-11032: [SPS]: Handling of block movement failure at the coordinator datanode. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f18deaee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f18deaee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f18deaee

Branch: refs/heads/HDFS-10285
Commit: f18deaeeb69badb972f1155004e83e2de60f8ef5
Parents: 98ced53
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Thu Dec 22 17:07:49 2016 -0800
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:54:31 2017 +0530

----------------------------------------------------------------------
 .../datanode/StoragePolicySatisfyWorker.java    |   9 +-
 .../namenode/TestStoragePolicySatisfier.java    | 168 +++++++++++++++----
 2 files changed, 143 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f18deaee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index a69a38b..19f3fe2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockPinningException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@@ -251,6 +252,12 @@ public class StoragePolicySatisfyWorker {
                 + " satisfying storageType:{}",
             block, source, target, targetStorageType);
         return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
+      } catch (BlockPinningException e) {
+        // Pinned block won't be able to move to a different node. So, its not
+        // required to do retries, just marked as SUCCESS.
+        LOG.debug("Pinned block can't be moved, so skipping block:{}", block,
+            e);
+        return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
       } catch (IOException e) {
         // TODO: handle failure retries
         LOG.warn(
@@ -282,7 +289,7 @@ public class StoragePolicySatisfyWorker {
         response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
       }
       String logInfo = "reportedBlock move is failed";
-      DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
+      DataTransferProtoUtil.checkBlockOpStatus(response, logInfo, true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f18deaee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fe23f3e..179b66b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KE
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
@@ -36,12 +37,15 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -66,10 +70,16 @@ public class TestStoragePolicySatisfier {
   final private long capacity = 2 * 256 * 1024 * 1024;
   final private String file = "/testMoveWhenStoragePolicyNotSatisfying";
   private DistributedFileSystem dfs = null;
+  private static final int DEFAULT_BLOCK_SIZE = 1024;
 
-  @Before
-  public void setUp() throws IOException {
-    config.setLong("dfs.block.size", 1024);
+  private void shutdownCluster() {
+    if (hdfsCluster != null) {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  private void createCluster() throws IOException {
+    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     hdfsCluster = startCluster(config, allDiskTypes, numOfDatanodes,
         storagesPerDatanode, capacity);
     dfs = hdfsCluster.getFileSystem();
@@ -81,6 +91,7 @@ public class TestStoragePolicySatisfier {
       throws Exception {
 
     try {
+      createCluster();
       // Change policy to COLD
       dfs.setStoragePolicy(new Path(file), "COLD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
@@ -99,7 +110,7 @@ public class TestStoragePolicySatisfier {
       // Wait till namenode notified about the block location details
       waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -107,6 +118,7 @@ public class TestStoragePolicySatisfier {
   public void testWhenStoragePolicySetToALLSSD()
       throws Exception {
     try {
+      createCluster();
       // Change policy to ALL_SSD
       dfs.setStoragePolicy(new Path(file), "ALL_SSD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
@@ -127,7 +139,7 @@ public class TestStoragePolicySatisfier {
       // areas
       waitExpectedStorageType(file, StorageType.SSD, 3, 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -135,6 +147,7 @@ public class TestStoragePolicySatisfier {
   public void testWhenStoragePolicySetToONESSD()
       throws Exception {
     try {
+      createCluster();
       // Change policy to ONE_SSD
       dfs.setStoragePolicy(new Path(file), "ONE_SSD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
@@ -154,7 +167,7 @@ public class TestStoragePolicySatisfier {
       waitExpectedStorageType(file, StorageType.SSD, 1, 30000);
       waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -165,6 +178,7 @@ public class TestStoragePolicySatisfier {
   @Test(timeout = 300000)
   public void testPerTrackIdBlocksStorageMovementResults() throws Exception {
     try {
+      createCluster();
       // Change policy to ONE_SSD
       dfs.setStoragePolicy(new Path(file), "ONE_SSD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
@@ -186,7 +200,7 @@ public class TestStoragePolicySatisfier {
 
       waitForBlocksMovementResult(1, 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -196,18 +210,18 @@ public class TestStoragePolicySatisfier {
    */
   @Test(timeout = 300000)
   public void testMultipleFilesForSatisfyStoragePolicy() throws Exception {
-    List<String> files = new ArrayList<>();
-    files.add(file);
-
-    // Creates 4 more files. Send all of them for satisfying the storage policy
-    // together.
-    for (int i = 0; i < 4; i++) {
-      String file1 = "/testMoveWhenStoragePolicyNotSatisfying_" + i;
-      files.add(file1);
-      writeContent(file1);
-    }
-
     try {
+      createCluster();
+      List<String> files = new ArrayList<>();
+      files.add(file);
+
+      // Creates 4 more files. Send all of them for satisfying the storage
+      // policy together.
+      for (int i = 0; i < 4; i++) {
+        String file1 = "/testMoveWhenStoragePolicyNotSatisfying_" + i;
+        files.add(file1);
+        writeContent(file1);
+      }
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       List<Long> blockCollectionIds = new ArrayList<>();
       // Change policy to ONE_SSD
@@ -237,7 +251,7 @@ public class TestStoragePolicySatisfier {
 
       waitForBlocksMovementResult(blockCollectionIds.size(), 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -247,10 +261,10 @@ public class TestStoragePolicySatisfier {
    */
   @Test(timeout = 300000)
   public void testSatisfyFileWithHdfsAdmin() throws Exception {
-    HdfsAdmin hdfsAdmin =
-        new HdfsAdmin(FileSystem.getDefaultUri(config), config);
     try {
-
+      createCluster();
+      HdfsAdmin hdfsAdmin =
+          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
       // Change policy to COLD
       dfs.setStoragePolicy(new Path(file), "COLD");
 
@@ -267,7 +281,7 @@ public class TestStoragePolicySatisfier {
       // Wait till namenode notified about the block location details
       waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -277,11 +291,10 @@ public class TestStoragePolicySatisfier {
    */
   @Test(timeout = 300000)
   public void testSatisfyDirWithHdfsAdmin() throws Exception {
-    HdfsAdmin hdfsAdmin =
-        new HdfsAdmin(FileSystem.getDefaultUri(config), config);
-
     try {
-
+      createCluster();
+      HdfsAdmin hdfsAdmin =
+          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
       final String subDir = "/subDir";
       final String subFile1 = subDir + "/subFile1";
       final String subDir2 = subDir + "/subDir2";
@@ -310,7 +323,7 @@ public class TestStoragePolicySatisfier {
       // take no effect for the sub-dir's file in the directory.
       waitExpectedStorageType(subFile2, StorageType.DEFAULT, 3, 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -321,6 +334,7 @@ public class TestStoragePolicySatisfier {
   @Test(timeout = 300000)
   public void testSatisfyWithExceptions() throws Exception {
     try {
+      createCluster();
       final String nonExistingFile = "/noneExistingFile";
       hdfsCluster.getConfiguration(0).
           setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
@@ -354,7 +368,7 @@ public class TestStoragePolicySatisfier {
 
       }
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -376,6 +390,7 @@ public class TestStoragePolicySatisfier {
   public void testWhenOnlyFewTargetDatanodeAreAvailableToSatisfyStoragePolicy()
       throws Exception {
     try {
+      createCluster();
       // Change policy to COLD
       dfs.setStoragePolicy(new Path(file), "COLD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
@@ -397,7 +412,7 @@ public class TestStoragePolicySatisfier {
 
       waitForBlocksMovementResult(1, 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -418,6 +433,7 @@ public class TestStoragePolicySatisfier {
   public void testWhenNoTargetDatanodeToSatisfyStoragePolicy()
       throws Exception {
     try {
+      createCluster();
       // Change policy to COLD
       dfs.setStoragePolicy(new Path(file), "COLD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
@@ -440,7 +456,7 @@ public class TestStoragePolicySatisfier {
       // re-attempted.
       waitForAttemptedItems(1, 30000);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -452,6 +468,7 @@ public class TestStoragePolicySatisfier {
   public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
       throws IOException {
     try {
+      createCluster();
       // Simulate Mover by creating MOVER_ID file
       DFSTestUtil.createFile(hdfsCluster.getFileSystem(),
           HdfsServerConstants.MOVER_ID_PATH, 0, (short) 1, 0);
@@ -461,8 +478,93 @@ public class TestStoragePolicySatisfier {
       Assert.assertFalse("SPS should not start "
           + "when a Mover instance is running", running);
     } finally {
-      hdfsCluster.shutdown();
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test to verify that satisfy worker can't move blocks. If the given block is
+   * pinned it shouldn't be considered for retries.
+   */
+  @Test(timeout = 120000)
+  public void testMoveWithBlockPinning() throws Exception {
+    config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
+    hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(3)
+        .storageTypes(
+            new StorageType[][] {{StorageType.DISK, StorageType.DISK},
+                {StorageType.DISK, StorageType.DISK},
+                {StorageType.DISK, StorageType.DISK}})
+        .build();
+
+    hdfsCluster.waitActive();
+    dfs = hdfsCluster.getFileSystem();
+
+    // create a file with replication factor 3 and mark 2 pinned block
+    // locations.
+    final String file1 = createFileAndSimulateFavoredNodes(2);
+
+    // Change policy to COLD
+    dfs.setStoragePolicy(new Path(file1), "COLD");
+    FSNamesystem namesystem = hdfsCluster.getNamesystem();
+    INode inode = namesystem.getFSDirectory().getINode(file1);
+
+    StorageType[][] newtypes =
+        new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+    // Adding DISK based datanodes
+    startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
+        storagesPerDatanode, capacity, hdfsCluster);
+
+    namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+    hdfsCluster.triggerHeartbeats();
+
+    // No block movement will be scheduled as there is no target node available
+    // with the required storage type.
+    waitForAttemptedItems(1, 30000);
+    waitForBlocksMovementResult(1, 30000);
+    waitExpectedStorageType(file1, StorageType.ARCHIVE, 1, 30000);
+    waitExpectedStorageType(file1, StorageType.DISK, 2, 30000);
+  }
+
+  private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
+      throws IOException {
+    ArrayList<DataNode> dns = hdfsCluster.getDataNodes();
+    final String file1 = "/testMoveWithBlockPinning";
+    // replication factor 3
+    InetSocketAddress[] favoredNodes = new InetSocketAddress[favoredNodesCount];
+    for (int i = 0; i < favoredNodesCount; i++) {
+      favoredNodes[i] = dns.get(i).getXferAddress();
+    }
+    DFSTestUtil.createFile(dfs, new Path(file1), false, 1024, 100,
+        DEFAULT_BLOCK_SIZE, (short) 3, 0, false, favoredNodes);
+
+    LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
+    Assert.assertEquals("Wrong block count", 1,
+        locatedBlocks.locatedBlockCount());
+
+    // verify storage type before movement
+    LocatedBlock lb = locatedBlocks.get(0);
+    StorageType[] storageTypes = lb.getStorageTypes();
+    for (StorageType storageType : storageTypes) {
+      Assert.assertTrue(StorageType.DISK == storageType);
+    }
+
+    // Mock FsDatasetSpi#getPinning to show that the block is pinned.
+    DatanodeInfo[] locations = lb.getLocations();
+    Assert.assertEquals(3, locations.length);
+    Assert.assertTrue(favoredNodesCount < locations.length);
+    for(DatanodeInfo dnInfo: locations){
+      LOG.info("Simulate block pinning in datanode {}",
+          locations[favoredNodesCount]);
+      DataNode dn = hdfsCluster.getDataNode(dnInfo.getIpcPort());
+      DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
+      favoredNodesCount--;
+      if (favoredNodesCount <= 0) {
+        break;// marked favoredNodesCount number of pinned block location
+      }
     }
+    return file1;
   }
 
   private void waitForAttemptedItems(long expectedBlkMovAttemptedCount,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HDFS-12319. DirectoryScanner will throw IllegalStateException when Multiple BP's are present. Contributed by Brahma Reddy Battula.

Posted by ra...@apache.org.
HDFS-12319. DirectoryScanner will throw IllegalStateException when Multiple BP's are present. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b2c442d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b2c442d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b2c442d

Branch: refs/heads/HDFS-10285
Commit: 4b2c442d4e34f4708fa2ca442208427ca10798c1
Parents: 8b7cbe3
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Aug 25 10:41:52 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Aug 25 10:41:52 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/DirectoryScanner.java  |  5 +-
 .../server/datanode/TestDirectoryScanner.java   | 60 ++++++++++++++++++--
 2 files changed, 58 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2c442d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 18188dd..966bcb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -606,12 +606,11 @@ public class DirectoryScanner implements Runnable {
     public ScanInfoPerBlockPool call() throws IOException {
       String[] bpList = volume.getBlockPoolList();
       ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
+      perfTimer.start();
+      throttleTimer.start();
       for (String bpid : bpList) {
         LinkedList<ScanInfo> report = new LinkedList<>();
 
-        perfTimer.start();
-        throttleTimer.start();
-
         try {
           result.put(bpid, volume.compileReport(bpid, report, this));
         } catch (InterruptedException ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2c442d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 956406d..c95c71b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
 import org.apache.hadoop.util.AutoCloseableLock;
@@ -316,15 +317,22 @@ public class TestDirectoryScanner {
          missingMemoryBlocks, mismatchBlocks, 0);
   }
 
-    private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile,
-      long missingMemoryBlocks, long mismatchBlocks, long duplicateBlocks) throws IOException {
+  private void scan(long totalBlocks, int diffsize, long missingMetaFile,
+      long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
+      long duplicateBlocks) throws IOException {
     scanner.reconcile();
-    
+    verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
+        missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+  }
+
+  private void verifyStats(long totalBlocks, int diffsize, long missingMetaFile,
+      long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
+      long duplicateBlocks) {
     assertTrue(scanner.diffs.containsKey(bpid));
     LinkedList<FsVolumeSpi.ScanInfo> diff = scanner.diffs.get(bpid);
     assertTrue(scanner.stats.containsKey(bpid));
     DirectoryScanner.Stats stats = scanner.stats.get(bpid);
-    
+
     assertEquals(diffsize, diff.size());
     assertEquals(totalBlocks, stats.totalBlocks);
     assertEquals(missingMetaFile, stats.missingMetaFile);
@@ -1035,4 +1043,48 @@ public class TestDirectoryScanner {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testDirectoryScannerInFederatedCluster() throws Exception {
+    //Create Federated cluster with two nameservices and one DN
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
+        .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
+        .numDataNodes(1).build()) {
+      cluster.waitActive();
+      cluster.transitionToActive(1);
+      cluster.transitionToActive(3);
+      DataNode dataNode = cluster.getDataNodes().get(0);
+      fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
+      //Create one block in first nameservice
+      FileSystem fs = cluster.getFileSystem(1);
+      int bp1Files = 1;
+      writeFile(fs, bp1Files);
+      //Create two blocks in second nameservice
+      FileSystem fs2 = cluster.getFileSystem(3);
+      int bp2Files = 2;
+      writeFile(fs2, bp2Files);
+      //Call the Directory scanner
+      scanner = new DirectoryScanner(dataNode, fds, CONF);
+      scanner.setRetainDiffs(true);
+      scanner.reconcile();
+      //Check blocks in corresponding BP
+      bpid = cluster.getNamesystem(1).getBlockPoolId();
+      verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
+      bpid = cluster.getNamesystem(3).getBlockPoolId();
+      verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+    } finally {
+      if (scanner != null) {
+        scanner.shutdown();
+        scanner = null;
+      }
+    }
+  }
+
+  private void writeFile(FileSystem fs, int numFiles) throws IOException {
+    final String fileName = "/" + GenericTestUtils.getMethodName();
+    final Path filePath = new Path(fileName);
+    for (int i = 0; i < numFiles; i++) {
+      DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HDFS-12299. Race Between update pipeline and DN Re-Registration

Posted by ra...@apache.org.
HDFS-12299. Race Between update pipeline and DN Re-Registration


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8455d707
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8455d707
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8455d707

Branch: refs/heads/HDFS-10285
Commit: 8455d70756b584ddf27fc626a147f4eb2e1dc94e
Parents: 4b2c442
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Aug 25 14:49:29 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Aug 25 14:49:29 2017 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DataStreamer.java    |  3 +-
 .../TestClientProtocolForPipelineRecovery.java  | 47 ++++++++++++++++++++
 2 files changed, 49 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8455d707/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index f5ce0ff..838da7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -1620,7 +1620,8 @@ class DataStreamer extends Daemon {
   }
 
   /** update pipeline at the namenode */
-  private void updatePipeline(long newGS) throws IOException {
+  @VisibleForTesting
+  public void updatePipeline(long newGS) throws IOException {
     final ExtendedBlock oldBlock = block.getCurrentBlock();
     // the new GS has been propagated to all DN, it should be ok to update the
     // local block state

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8455d707/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
index 0212c4e..3f8c7f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
@@ -38,6 +38,9 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
@@ -706,4 +709,48 @@ public class TestClientProtocolForPipelineRecovery {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testUpdatePipeLineAfterDNReg()throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+      cluster.waitActive();
+      FileSystem fileSys = cluster.getFileSystem();
+
+      Path file = new Path("/testUpdatePipeLineAfterDNReg");
+      FSDataOutputStream out = fileSys.create(file);
+      out.write(1);
+      out.hflush();
+      //Get the First DN and disable the heartbeats and then put in Deadstate
+      DataNode dn1 = cluster.getDataNodes().get(0);
+      dn1.setHeartbeatsDisabledForTests(true);
+      DatanodeDescriptor dn1Desc = cluster.getNamesystem(0).getBlockManager()
+          .getDatanodeManager().getDatanode(dn1.getDatanodeId());
+      cluster.setDataNodeDead(dn1Desc);
+      //Re-register the DeadNode
+      DatanodeProtocolClientSideTranslatorPB dnp = new DatanodeProtocolClientSideTranslatorPB(
+          cluster.getNameNode().getNameNodeAddress(), conf);
+      dnp.registerDatanode(
+          dn1.getDNRegistrationForBP(cluster.getNamesystem().getBlockPoolId()));
+      DFSOutputStream dfsO = (DFSOutputStream) out.getWrappedStream();
+      String clientName = ((DistributedFileSystem) fileSys).getClient()
+          .getClientName();
+      NamenodeProtocols namenode = cluster.getNameNodeRpc();
+      //Update the genstamp and call updatepipeline
+      LocatedBlock newBlock = namenode
+          .updateBlockForPipeline(dfsO.getBlock(), clientName);
+      dfsO.getStreamer()
+          .updatePipeline(newBlock.getBlock().getGenerationStamp());
+      newBlock = namenode.updateBlockForPipeline(dfsO.getBlock(), clientName);
+      //Should not throw any error Pipeline should be success
+      dfsO.getStreamer()
+          .updatePipeline(newBlock.getBlock().getGenerationStamp());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HDFS-11239: [SPS]: Check Mover file ID lease also to determine whether Mover is running. Contributed by Wei Zhou

Posted by ra...@apache.org.
HDFS-11239: [SPS]: Check Mover file ID lease also to determine whether Mover is running. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34ba6df6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34ba6df6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34ba6df6

Branch: refs/heads/HDFS-10285
Commit: 34ba6df6131fa3821924c0fd81a467a582da14fb
Parents: 69d8ccc
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Fri Feb 17 20:49:38 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:20 2017 +0530

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSNamesystem.java      |  17 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |   7 ++
 .../server/namenode/StoragePolicySatisfier.java |  19 +---
 .../namenode/TestStoragePolicySatisfier.java    | 108 +++++++++++++++----
 4 files changed, 113 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ba6df6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 3cb47b9..b3663a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3437,7 +3437,22 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   BlockInfo getStoredBlock(Block block) {
     return blockManager.getStoredBlock(block);
   }
-  
+
+  @Override
+  public boolean isFileOpenedForWrite(String path) {
+    readLock();
+    try {
+      INode inode = dir.getINode(path, FSDirectory.DirOp.READ);
+      INodeFile iNodeFile = INodeFile.valueOf(inode, path);
+      LeaseManager.Lease lease = leaseManager.getLease(iNodeFile);
+      return lease != null;
+    } catch (IOException e) {
+      return false;
+    } finally {
+      readUnlock();
+    }
+  }
+
   @Override
   public boolean isInSnapshot(long blockCollectionID) {
     assert hasReadLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ba6df6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
index e07376b..a2b07ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
@@ -45,4 +45,11 @@ public interface Namesystem extends RwLock, SafeMode {
    *         middle of the starting active services.
    */
   boolean inTransitionToActive();
+
+  /**
+   * Check if file is been opened for write purpose.
+   * @param filePath
+   * @return true if valid write lease exists, otherwise return false.
+   */
+  boolean isFileOpenedForWrite(String filePath);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ba6df6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index dc58294..29c8a5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -151,19 +150,8 @@ public class StoragePolicySatisfier implements Runnable {
 
   // Return true if a Mover instance is running
   private boolean checkIfMoverRunning() {
-    boolean ret = false;
-    try {
-      String moverId = HdfsServerConstants.MOVER_ID_PATH.toString();
-      INode inode = namesystem.getFSDirectory().getINode(
-          moverId, FSDirectory.DirOp.READ);
-      if (inode != null) {
-        ret = true;
-      }
-    } catch (IOException e) {
-      LOG.info("StoragePolicySatisfier is enabled as no Mover ID file found.");
-      ret = false;
-    }
-    return ret;
+    String moverId = HdfsServerConstants.MOVER_ID_PATH.toString();
+    return namesystem.isFileOpenedForWrite(moverId);
   }
 
   @Override
@@ -177,7 +165,8 @@ public class StoragePolicySatisfier implements Runnable {
         this.storageMovementsMonitor.stop();
         LOG.error(
             "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-                + HdfsServerConstants.MOVER_ID_PATH.toString() + " exists");
+                + HdfsServerConstants.MOVER_ID_PATH.toString()
+                + " been opened. Maybe a Mover instance is running!");
         return;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ba6df6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index de73e8b..2a33455 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -97,29 +98,33 @@ public class TestStoragePolicySatisfier {
 
     try {
       createCluster();
-      // Change policy to COLD
-      dfs.setStoragePolicy(new Path(file), COLD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
-              {StorageType.ARCHIVE, StorageType.ARCHIVE},
-              {StorageType.ARCHIVE, StorageType.ARCHIVE}};
-      startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
-          storagesPerDatanode, capacity, hdfsCluster);
-
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
-
-      hdfsCluster.triggerHeartbeats();
-      // Wait till namenode notified about the block location details
-      DFSTestUtil.waitExpectedStorageType(
-          file, StorageType.ARCHIVE, 3, 30000, dfs);
+      doTestWhenStoragePolicySetToCOLD();
     } finally {
       shutdownCluster();
     }
   }
 
+  private void doTestWhenStoragePolicySetToCOLD() throws Exception {
+    // Change policy to COLD
+    dfs.setStoragePolicy(new Path(file), COLD);
+    FSNamesystem namesystem = hdfsCluster.getNamesystem();
+    INode inode = namesystem.getFSDirectory().getINode(file);
+
+    StorageType[][] newtypes =
+        new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+    startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
+        storagesPerDatanode, capacity, hdfsCluster);
+
+    namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+
+    hdfsCluster.triggerHeartbeats();
+    // Wait till namenode notified about the block location details
+    DFSTestUtil.waitExpectedStorageType(
+        file, StorageType.ARCHIVE, 3, 30000, dfs);
+  }
+
   @Test(timeout = 300000)
   public void testWhenStoragePolicySetToALLSSD()
       throws Exception {
@@ -500,19 +505,78 @@ public class TestStoragePolicySatisfier {
    */
   @Test(timeout = 300000)
   public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
+      throws Exception {
+    boolean running;
+    FSDataOutputStream out = null;
+    try {
+      createCluster();
+      // Stop SPS
+      hdfsCluster.getNameNode().reconfigurePropertyImpl(
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
+      running = hdfsCluster.getFileSystem()
+          .getClient().isStoragePolicySatisfierRunning();
+      Assert.assertFalse("SPS should stopped as configured.", running);
+
+      // Simulate the case by creating MOVER_ID file
+      out = hdfsCluster.getFileSystem().create(
+          HdfsServerConstants.MOVER_ID_PATH);
+
+      // Restart SPS
+      hdfsCluster.getNameNode().reconfigurePropertyImpl(
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
+
+      running = hdfsCluster.getFileSystem()
+          .getClient().isStoragePolicySatisfierRunning();
+      Assert.assertFalse("SPS should not be able to run as file "
+          + HdfsServerConstants.MOVER_ID_PATH + " is being hold.", running);
+
+      // Simulate Mover exists
+      out.close();
+      out = null;
+      hdfsCluster.getFileSystem().delete(
+          HdfsServerConstants.MOVER_ID_PATH, true);
+
+      // Restart SPS again
+      hdfsCluster.getNameNode().reconfigurePropertyImpl(
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
+      running = hdfsCluster.getFileSystem()
+          .getClient().isStoragePolicySatisfierRunning();
+      Assert.assertTrue("SPS should be running as "
+          + "Mover already exited", running);
+
+      // Check functionality after SPS restart
+      doTestWhenStoragePolicySetToCOLD();
+    } catch (ReconfigurationException e) {
+      throw new IOException("Exception when reconfigure "
+          + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, e);
+    } finally {
+      if (out != null) {
+        out.close();
+      }
+      hdfsCluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests to verify that SPS should be able to start when the Mover ID file
+   * is not being hold by a Mover. This can be the case when Mover exits
+   * ungracefully without deleting the ID file from HDFS.
+   */
+  @Test(timeout = 300000)
+  public void testWhenMoverExitsWithoutDeleteMoverIDFile()
       throws IOException {
     try {
       createCluster();
-      // Simulate Mover by creating MOVER_ID file
+      // Simulate the case by creating MOVER_ID file
       DFSTestUtil.createFile(hdfsCluster.getFileSystem(),
           HdfsServerConstants.MOVER_ID_PATH, 0, (short) 1, 0);
       hdfsCluster.restartNameNode(true);
       boolean running = hdfsCluster.getFileSystem()
           .getClient().isStoragePolicySatisfierRunning();
-      Assert.assertFalse("SPS should not start "
-          + "when a Mover instance is running", running);
+      Assert.assertTrue("SPS should be running as "
+          + "no Mover really running", running);
     } finally {
-      shutdownCluster();
+      hdfsCluster.shutdown();
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to improve thread cleanup time. Contributed by Rakesh R.

Posted by ra...@apache.org.
HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to improve thread cleanup time. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e17983d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e17983d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e17983d

Branch: refs/heads/HDFS-10285
Commit: 1e17983d5d4c4a3bc01e16c266b7e5e8434124cc
Parents: ac4d9b0
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Wed Jul 19 00:55:26 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:33 2017 +0530

----------------------------------------------------------------------
 .../datanode/BlockStorageMovementTracker.java       | 16 ++++++++++++----
 .../server/datanode/StoragePolicySatisfyWorker.java |  5 +++--
 2 files changed, 15 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e17983d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index c7e952b..f3d2bb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -77,7 +77,8 @@ public class BlockStorageMovementTracker implements Runnable {
             moverTaskFutures.wait(2000);
           }
         } catch (InterruptedException ignore) {
-          // ignore
+          // Sets interrupt flag of this thread.
+          Thread.currentThread().interrupt();
         }
       }
       try {
@@ -102,12 +103,19 @@ public class BlockStorageMovementTracker implements Runnable {
             synchronized (moverTaskFutures) {
               moverTaskFutures.remove(trackId);
             }
-            // handle completed or inprogress blocks movements per trackId.
-            blksMovementsStatusHandler.handle(resultPerTrackIdList);
+            if (running) {
+              // handle completed or inprogress blocks movements per trackId.
+              blksMovementsStatusHandler.handle(resultPerTrackIdList);
+            }
             movementResults.remove(trackId);
           }
         }
-      } catch (ExecutionException | InterruptedException e) {
+      } catch (InterruptedException e) {
+        if (running) {
+          LOG.error("Exception while moving block replica to target storage"
+              + " type", e);
+        }
+      } catch (ExecutionException e) {
         // TODO: Do we need failure retries and implement the same if required.
         LOG.error("Exception while moving block replica to target storage type",
             e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e17983d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 196cd58..4e57805 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -137,8 +137,8 @@ public class StoragePolicySatisfyWorker {
    * thread.
    */
   void stop() {
-    movementTrackerThread.interrupt();
     movementTracker.stopTracking();
+    movementTrackerThread.interrupt();
   }
 
   /**
@@ -147,7 +147,8 @@ public class StoragePolicySatisfyWorker {
   void waitToFinishWorkerThread() {
     try {
       movementTrackerThread.join(3000);
-    } catch (InterruptedException ie) {
+    } catch (InterruptedException ignore) {
+      // ignore
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDFS-10801. [SPS]: Protocol buffer changes for sending storage movement commands from NN to DN. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-10801. [SPS]: Protocol buffer changes for sending storage movement commands from NN to DN. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b7b964e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b7b964e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b7b964e

Branch: refs/heads/HDFS-10285
Commit: 1b7b964e7c1524b15ed673b2bf878da20f6f103a
Parents: 567977b
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Tue Oct 11 11:44:06 2016 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:08 2017 +0530

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 90 ++++++++++++++++++++
 .../blockmanagement/DatanodeDescriptor.java     | 15 ----
 .../server/blockmanagement/DatanodeManager.java | 13 +++
 .../hdfs/server/datanode/BPOfferService.java    |  8 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  8 +-
 .../datanode/StoragePolicySatisfyWorker.java    | 22 ++++-
 .../protocol/BlockStorageMovementCommand.java   | 71 ++++++++++++++-
 .../hdfs/server/protocol/DatanodeProtocol.java  |  1 +
 .../src/main/proto/DatanodeProtocol.proto       | 22 +++++
 .../namenode/TestStoragePolicySatisfier.java    | 86 +++++++++++--------
 10 files changed, 276 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 6539d32..83f3454 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -52,6 +52,8 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockStorageMovementCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockStorageMovementProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@@ -94,6 +96,8 @@ import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.Block
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
@@ -465,6 +469,8 @@ public class PBHelper {
       return PBHelper.convert(proto.getBlkIdCmd());
     case BlockECReconstructionCommand:
       return PBHelper.convert(proto.getBlkECReconstructionCmd());
+    case BlockStorageMovementCommand:
+      return PBHelper.convert(proto.getBlkStorageMovementCmd());
     default:
       return null;
     }
@@ -599,6 +605,11 @@ public class PBHelper {
           .setBlkECReconstructionCmd(
               convert((BlockECReconstructionCommand) datanodeCommand));
       break;
+    case DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT:
+      builder.setCmdType(DatanodeCommandProto.Type.BlockStorageMovementCommand)
+          .setBlkStorageMovementCmd(
+              convert((BlockStorageMovementCommand) datanodeCommand));
+      break;
     case DatanodeProtocol.DNA_UNKNOWN: //Not expected
     default:
       builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
@@ -1096,4 +1107,83 @@ public class PBHelper {
         DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION,
         blkECReconstructionInfos);
   }
+
+  private static BlockStorageMovementCommandProto convert(
+      BlockStorageMovementCommand blkStorageMovementCmd) {
+    BlockStorageMovementCommandProto.Builder builder =
+        BlockStorageMovementCommandProto.newBuilder();
+
+    builder.setTrackID(blkStorageMovementCmd.getTrackID());
+    builder.setBlockPoolId(blkStorageMovementCmd.getBlockPoolId());
+    Collection<BlockMovingInfo> blockMovingInfos = blkStorageMovementCmd
+        .getBlockMovingTasks();
+    for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
+      builder.addBlockStorageMovement(
+          convertBlockMovingInfo(blkMovingInfo));
+    }
+    return builder.build();
+  }
+
+  private static BlockStorageMovementProto convertBlockMovingInfo(
+      BlockMovingInfo blkMovingInfo) {
+    BlockStorageMovementProto.Builder builder = BlockStorageMovementProto
+        .newBuilder();
+    builder.setBlock(PBHelperClient.convert(blkMovingInfo.getBlock()));
+
+    DatanodeInfo[] sourceDnInfos = blkMovingInfo.getSources();
+    builder.setSourceDnInfos(convertToDnInfosProto(sourceDnInfos));
+
+    DatanodeInfo[] targetDnInfos = blkMovingInfo.getTargets();
+    builder.setTargetDnInfos(convertToDnInfosProto(targetDnInfos));
+
+    StorageType[] sourceStorageTypes = blkMovingInfo.getSourceStorageTypes();
+    builder.setSourceStorageTypes(convertStorageTypesProto(sourceStorageTypes));
+
+    StorageType[] targetStorageTypes = blkMovingInfo.getTargetStorageTypes();
+    builder.setTargetStorageTypes(convertStorageTypesProto(targetStorageTypes));
+
+    return builder.build();
+  }
+
+  private static DatanodeCommand convert(
+      BlockStorageMovementCommandProto blkStorageMovementCmdProto) {
+    Collection<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
+    List<BlockStorageMovementProto> blkSPSatisfyList =
+        blkStorageMovementCmdProto.getBlockStorageMovementList();
+    for (BlockStorageMovementProto blkSPSatisfy : blkSPSatisfyList) {
+      blockMovingInfos.add(convertBlockMovingInfo(blkSPSatisfy));
+    }
+    return new BlockStorageMovementCommand(
+        DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT,
+        blkStorageMovementCmdProto.getTrackID(),
+        blkStorageMovementCmdProto.getBlockPoolId(), blockMovingInfos);
+  }
+
+  private static BlockMovingInfo convertBlockMovingInfo(
+      BlockStorageMovementProto blockStoragePolicySatisfyProto) {
+    BlockProto blockProto = blockStoragePolicySatisfyProto.getBlock();
+    Block block = PBHelperClient.convert(blockProto);
+
+    DatanodeInfosProto sourceDnInfosProto = blockStoragePolicySatisfyProto
+        .getSourceDnInfos();
+    DatanodeInfo[] sourceDnInfos = PBHelperClient.convert(sourceDnInfosProto);
+
+    DatanodeInfosProto targetDnInfosProto = blockStoragePolicySatisfyProto
+        .getTargetDnInfos();
+    DatanodeInfo[] targetDnInfos = PBHelperClient.convert(targetDnInfosProto);
+
+    StorageTypesProto srcStorageTypesProto = blockStoragePolicySatisfyProto
+        .getSourceStorageTypes();
+    StorageType[] srcStorageTypes = PBHelperClient.convertStorageTypes(
+        srcStorageTypesProto.getStorageTypesList(),
+        srcStorageTypesProto.getStorageTypesList().size());
+
+    StorageTypesProto targetStorageTypesProto = blockStoragePolicySatisfyProto
+        .getTargetStorageTypes();
+    StorageType[] targetStorageTypes = PBHelperClient.convertStorageTypes(
+        targetStorageTypesProto.getStorageTypesList(),
+        targetStorageTypesProto.getStorageTypesList().size());
+    return new BlockMovingInfo(block, sourceDnInfos, targetDnInfos,
+        srcStorageTypes, targetStorageTypes);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 46a45ee..1db002b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -1039,19 +1039,4 @@ public class DatanodeDescriptor extends DatanodeInfo {
   public List<BlockMovingInfo> getBlocksToMoveStorages() {
     return storageMovementBlocks.poll();
   }
-
-  // TODO: we will remove this method once DN side handling integrated. We can
-  // convert the test to check real block movements instead of this ds.
-  @VisibleForTesting
-  public List<BlockMovingInfo> getStorageMovementPendingItems() {
-    List<BlockMovingInfo> flatList = new ArrayList<>();
-    Iterator<List<BlockMovingInfo>> iterator = storageMovementBlocks
-        .iterator();
-    while (iterator.hasNext()) {
-      List<BlockMovingInfo> next = iterator.next();
-      flatList.addAll(next);
-    }
-    return flatList;
-  }
 }
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 13c730d..6a5cfbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1737,6 +1737,19 @@ public class DatanodeManager {
       }
     }
 
+    // check pending block storage movement tasks
+    List<BlockMovingInfo> pendingBlockMovementList = nodeinfo
+        .getBlocksToMoveStorages();
+    if (pendingBlockMovementList != null) {
+      // TODO: trackID is used to track the block movement sends to coordinator
+      // datanode. Need to implement tracking logic. Temporarily, using a
+      // constant value -1.
+      long trackID = -1;
+      cmds.add(new BlockStorageMovementCommand(
+          DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT, trackID, blockPoolId,
+          pendingBlockMovementList));
+    }
+
     if (!cmds.isEmpty()) {
       return cmds.toArray(new DatanodeCommand[cmds.size()]);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index dbf7c8d..c77fe2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -781,6 +781,13 @@ class BPOfferService {
           ((BlockECReconstructionCommand) cmd).getECTasks();
       dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
       break;
+    case DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT:
+      LOG.info("DatanodeCommand action: DNA_BLOCK_STORAGE_MOVEMENT");
+      BlockStorageMovementCommand blkSPSCmd = (BlockStorageMovementCommand) cmd;
+      dn.getStoragePolicySatisfyWorker().processBlockMovingTasks(
+          blkSPSCmd.getTrackID(), blkSPSCmd.getBlockPoolId(),
+          blkSPSCmd.getBlockMovingTasks());
+      break;
     default:
       LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
     }
@@ -811,6 +818,7 @@ class BPOfferService {
     case DatanodeProtocol.DNA_CACHE:
     case DatanodeProtocol.DNA_UNCACHE:
     case DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION:
+    case DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT:
       LOG.warn("Got a command from standby NN - ignoring command:" + cmd.getAction());
       break;
     default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 1a85b46..5a8ad68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -384,6 +384,7 @@ public class DataNode extends ReconfigurableBase
   private String dnUserName = null;
   private BlockRecoveryWorker blockRecoveryWorker;
   private ErasureCodingWorker ecWorker;
+  private StoragePolicySatisfyWorker storagePolicySatisfyWorker;
   private final Tracer tracer;
   private final TracerConfigurationManager tracerConfigurationManager;
   private static final int NUM_CORES = Runtime.getRuntime()
@@ -1420,6 +1421,8 @@ public class DataNode extends ReconfigurableBase
 
     ecWorker = new ErasureCodingWorker(getConf(), this);
     blockRecoveryWorker = new BlockRecoveryWorker(this);
+    storagePolicySatisfyWorker =
+        new StoragePolicySatisfyWorker(getConf(), this);
 
     blockPoolManager = new BlockPoolManager(this);
     blockPoolManager.refreshNamenodes(getConf());
@@ -3607,4 +3610,7 @@ public class DataNode extends ReconfigurableBase
     }
     return volumeInfoList;
   }
-}
+
+  StoragePolicySatisfyWorker getStoragePolicySatisfyWorker() {
+    return storagePolicySatisfyWorker;
+  }}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index fa408f6..2c99963 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -28,6 +28,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.Socket;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.concurrent.Callable;
@@ -126,8 +127,25 @@ public class StoragePolicySatisfyWorker {
     return moverThreadPool;
   }
 
+  /**
+   * Handles the given set of block movement tasks. This will iterate over the
+   * block movement list and submit each block movement task asynchronously in a
+   * separate thread. Each task will move the block replica to the target node &
+   * wait for the completion.
+   *
+   * TODO: Presently this function is a blocking call, this has to be refined by
+   * moving the tracking logic to another tracker thread. HDFS-10884 jira
+   * addresses the same.
+   *
+   * @param trackID
+   *          unique tracking identifier
+   * @param blockPoolID
+   *          block pool ID
+   * @param blockMovingInfos
+   *          list of blocks to be moved
+   */
   public void processBlockMovingTasks(long trackID, String blockPoolID,
-      List<BlockMovingInfo> blockMovingInfos) {
+      Collection<BlockMovingInfo> blockMovingInfos) {
     Future<Void> moveCallable = null;
     for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
       assert blkMovingInfo
@@ -143,8 +161,6 @@ public class StoragePolicySatisfyWorker {
       }
     }
 
-    // TODO: Presently this function act as a blocking call, this has to be
-    // refined by moving the tracking logic to another tracker thread.
     for (int i = 0; i < moverTaskFutures.size(); i++) {
       try {
         moveCallable = moverExecutorCompletionService.take();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
index c1ab800..7c97f1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.protocol;
 
 import java.util.Arrays;
+import java.util.Collection;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -33,12 +34,60 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
  * {@link org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker}
  * service. After the block movement this DataNode sends response back to the
  * NameNode about the movement status.
+ *
+ * The coordinator datanode will use 'trackId' identifier to coordinate the block
+ * movement of the given set of blocks. TrackId is a unique identifier that
+ * represents a group of blocks. Namenode will generate this unique value and
+ * send it to the coordinator datanode along with the
+ * BlockStorageMovementCommand. Datanode will monitor the completion of the
+ * block movements that grouped under this trackId and notifies Namenode about
+ * the completion status.
  */
 public class BlockStorageMovementCommand extends DatanodeCommand {
-  // TODO: constructor needs to be refined based on the block movement data
-  // structure.
-  BlockStorageMovementCommand(int action) {
+  private final long trackID;
+  private final String blockPoolId;
+  private final Collection<BlockMovingInfo> blockMovingTasks;
+
+  /**
+   * Block storage movement command constructor.
+   *
+   * @param action
+   *          protocol specific action
+   * @param trackID
+   *          unique identifier to monitor the given set of block movements
+   * @param blockPoolId
+   *          block pool ID
+   * @param blockMovingInfos
+   *          block to storage info that will be used for movement
+   */
+  public BlockStorageMovementCommand(int action, long trackID,
+      String blockPoolId, Collection<BlockMovingInfo> blockMovingInfos) {
     super(action);
+    this.trackID = trackID;
+    this.blockPoolId = blockPoolId;
+    this.blockMovingTasks = blockMovingInfos;
+  }
+
+  /**
+   * Returns trackID, which will be used to monitor the block movement assigned
+   * to this coordinator datanode.
+   */
+  public long getTrackID() {
+    return trackID;
+  }
+
+  /**
+   * Returns block pool ID.
+   */
+  public String getBlockPoolId() {
+    return blockPoolId;
+  }
+
+  /**
+   * Returns the list of blocks to be moved.
+   */
+  public Collection<BlockMovingInfo> getBlockMovingTasks() {
+    return blockMovingTasks;
   }
 
   /**
@@ -47,10 +96,24 @@ public class BlockStorageMovementCommand extends DatanodeCommand {
   public static class BlockMovingInfo {
     private Block blk;
     private DatanodeInfo[] sourceNodes;
-    private StorageType[] sourceStorageTypes;
     private DatanodeInfo[] targetNodes;
+    private StorageType[] sourceStorageTypes;
     private StorageType[] targetStorageTypes;
 
+    /**
+     * Block to storage info constructor.
+     *
+     * @param block
+     *          block
+     * @param sourceDnInfos
+     *          node that can be the sources of a block move
+     * @param targetDnInfos
+     *          target datanode info
+     * @param srcStorageTypes
+     *          type of source storage media
+     * @param targetStorageTypes
+     *          type of destin storage media
+     */
     public BlockMovingInfo(Block block,
         DatanodeInfo[] sourceDnInfos, DatanodeInfo[] targetDnInfos,
         StorageType[] srcStorageTypes, StorageType[] targetStorageTypes) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index 1f55100..283f367 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -79,6 +79,7 @@ public interface DatanodeProtocol {
   final static int DNA_CACHE = 9;      // cache blocks
   final static int DNA_UNCACHE = 10;   // uncache blocks
   final static int DNA_ERASURE_CODING_RECONSTRUCTION = 11; // erasure coding reconstruction command
+  final static int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command
 
   /** 
    * Register Datanode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index bf0df5b..8e19809 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -60,6 +60,7 @@ message DatanodeCommandProto {
     NullDatanodeCommand = 7;
     BlockIdCommand = 8;
     BlockECReconstructionCommand = 9;
+    BlockStorageMovementCommand = 10;
   }
 
   required Type cmdType = 1;    // Type of the command
@@ -74,6 +75,7 @@ message DatanodeCommandProto {
   optional RegisterCommandProto registerCmd = 7;
   optional BlockIdCommandProto blkIdCmd = 8;
   optional BlockECReconstructionCommandProto blkECReconstructionCmd = 9;
+  optional BlockStorageMovementCommandProto blkStorageMovementCmd = 10;
 }
 
 /**
@@ -154,6 +156,26 @@ message BlockECReconstructionCommandProto {
   repeated BlockECReconstructionInfoProto blockECReconstructioninfo = 1;
 }
 
+ /**
+ * Block storage movement command
+ */
+message BlockStorageMovementCommandProto {
+  required uint64 trackID = 1;
+  required string blockPoolId = 2;
+  repeated BlockStorageMovementProto blockStorageMovement = 3;
+}
+
+/**
+ * Block storage movement information
+ */
+message BlockStorageMovementProto {
+  required BlockProto block = 1;
+  required DatanodeInfosProto sourceDnInfos = 2;
+  required DatanodeInfosProto targetDnInfos = 3;
+  required StorageTypesProto sourceStorageTypes = 4;
+  required StorageTypesProto targetStorageTypes = 5;
+}
+
 /**
  * registration - Information of the datanode registering with the namenode
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b7b964e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index b61814d..37664b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -18,9 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -29,8 +26,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
@@ -74,9 +70,6 @@ public class TestStoragePolicySatisfier {
     try {
       // Change policy to ALL_SSD
       distributedFS.setStoragePolicy(new Path(file), "COLD");
-      Set<DatanodeDescriptor> previousNodes =
-          hdfsCluster.getNameNode().getNamesystem().getBlockManager()
-              .getDatanodeManager().getDatanodes();
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -91,8 +84,8 @@ public class TestStoragePolicySatisfier {
 
       hdfsCluster.triggerHeartbeats();
       // Wait till namenode notified about the block location details
-      waitExpectedStorageType(StorageType.ARCHIVE, distributedFS, previousNodes,
-          6, 30000);
+      waitExpectedStorageType(file, StorageType.ARCHIVE, distributedFS, 3,
+          30000);
     } finally {
       hdfsCluster.shutdown();
     }
@@ -104,9 +97,6 @@ public class TestStoragePolicySatisfier {
     try {
       // Change policy to ALL_SSD
       distributedFS.setStoragePolicy(new Path(file), "ALL_SSD");
-      Set<DatanodeDescriptor> previousNodes =
-          hdfsCluster.getNameNode().getNamesystem().getBlockManager()
-              .getDatanodeManager().getDatanodes();
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -123,8 +113,34 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier Identified that block to move to SSD
       // areas
-      waitExpectedStorageType(StorageType.SSD, distributedFS, previousNodes, 6,
-          30000);
+      waitExpectedStorageType(file, StorageType.SSD, distributedFS, 3, 30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  @Test(timeout = 300000)
+  public void testWhenStoragePolicySetToONESSD()
+      throws Exception {
+    try {
+      // Change policy to ONE_SSD
+      distributedFS.setStoragePolicy(new Path(file), "ONE_SSD");
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
+
+      // Making sure SDD based nodes added to cluster. Adding SSD based
+      // datanodes.
+      startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      hdfsCluster.triggerHeartbeats();
+      // Wait till StorgePolicySatisfier Identified that block to move to SSD
+      // areas
+      waitExpectedStorageType(file, StorageType.SSD, distributedFS, 1, 30000);
+      waitExpectedStorageType(file, StorageType.DISK, distributedFS, 2, 30000);
     } finally {
       hdfsCluster.shutdown();
     }
@@ -174,35 +190,31 @@ public class TestStoragePolicySatisfier {
     return cluster;
   }
 
-  // TODO: this assertion can be changed to end to end based assertion later
-  // when DN side processing work integrated to this work.
-  private void waitExpectedStorageType(final StorageType expectedStorageType,
-      final DistributedFileSystem dfs,
-      final Set<DatanodeDescriptor> previousNodes, int expectedArchiveCount,
-      int timeout) throws Exception {
+  // Check whether the Block movement has been successfully completed to satisfy
+  // the storage policy for the given file.
+  private void waitExpectedStorageType(final String fileName,
+      final StorageType expectedStorageType, final DistributedFileSystem dfs,
+      int expectedStorageCount, int timeout) throws Exception {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
-        Iterator<DatanodeDescriptor> iterator = previousNodes.iterator();
-        int archiveCount = 0;
-        while (iterator.hasNext()) {
-          DatanodeDescriptor dn = iterator.next();
-          List<BlockMovingInfo> pendingItemsToMove =
-              dn.getStorageMovementPendingItems();
-          for (BlockMovingInfo blkInfoToMoveStorage : pendingItemsToMove) {
-            StorageType[] targetStorageTypes =
-                blkInfoToMoveStorage.getTargetStorageTypes();
-            for (StorageType storageType : targetStorageTypes) {
-              if (storageType == expectedStorageType) {
-                archiveCount++;
-              }
-            }
+        LocatedBlock lb = null;
+        try {
+          lb = dfs.getClient().getLocatedBlocks(fileName, 0).get(0);
+        } catch (IOException e) {
+          LOG.error("Exception while getting located blocks", e);
+          return false;
+        }
+        int actualStorageCount = 0;
+        for (StorageType storageType : lb.getStorageTypes()) {
+          if (expectedStorageType == storageType) {
+            actualStorageCount++;
           }
         }
         LOG.info(
             expectedStorageType + " replica count, expected={} and actual={}",
-            expectedArchiveCount, archiveCount);
-        return expectedArchiveCount == archiveCount;
+            expectedStorageType, actualStorageCount);
+        return expectedStorageCount == actualStorageCount;
       }
     }, 100, timeout);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDFS-11289. [SPS]: Make SPS movement monitor timeouts configurable. Contributed by Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-11289. [SPS]: Make SPS movement monitor timeouts configurable. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40ced2b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40ced2b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40ced2b0

Branch: refs/heads/HDFS-10285
Commit: 40ced2b020efb0e7152cfebb99f8da1761d091a0
Parents: 3b1c262
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Mon Jan 9 19:07:43 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:54:45 2017 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  9 ++++++++
 .../server/blockmanagement/BlockManager.java    |  4 ++--
 .../BlockStorageMovementAttemptedItems.java     | 10 ++++-----
 .../server/namenode/StoragePolicySatisfier.java | 15 ++++++++-----
 .../src/main/resources/hdfs-default.xml         | 23 ++++++++++++++++++++
 5 files changed, 49 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40ced2b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1c8934f..bf93591 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -540,10 +540,19 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = "dfs.mover.max-no-move-interval";
   public static final int    DFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; // One minute
 
+  // SPS related configurations
   public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
       "dfs.storage.policy.satisfier.activate";
   public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
       true;
+  public static final String DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
+      "dfs.storage.policy.satisfier.recheck.timeout.millis";
+  public static final int DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =
+      5 * 60 * 1000;
+  public static final String DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
+      "dfs.storage.policy.satisfier.self.retry.timeout.millis";
+  public static final int DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
+      30 * 60 * 1000;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
   public static final int     DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40ced2b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bfebde4..fed1506 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -465,8 +465,8 @@ public class BlockManager implements BlockStatsMXBean {
             DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
             DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT);
     if (storagePolicyEnabled && spsEnabled) {
-      sps = new StoragePolicySatisfier(namesystem,
-          storageMovementNeeded, this);
+      sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
+          conf);
     } else {
       sps = null;
       LOG.warn(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40ced2b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index ce97075..042aca3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -64,14 +64,14 @@ public class BlockStorageMovementAttemptedItems {
   // It might take anywhere between 5 to 10 minutes before
   // a request is timed out.
   //
-  private long checkTimeout = 5 * 60 * 1000; // minimum value
+  private long minCheckTimeout = 5 * 60 * 1000; // minimum value
   private BlockStorageMovementNeeded blockStorageMovementNeeded;
 
-  public BlockStorageMovementAttemptedItems(long timeoutPeriod,
+  public BlockStorageMovementAttemptedItems(long recheckTimeout,
       long selfRetryTimeout,
       BlockStorageMovementNeeded unsatisfiedStorageMovementFiles) {
-    if (timeoutPeriod > 0) {
-      this.checkTimeout = Math.min(checkTimeout, timeoutPeriod);
+    if (recheckTimeout > 0) {
+      this.minCheckTimeout = Math.min(minCheckTimeout, recheckTimeout);
     }
 
     this.selfRetryTimeout = selfRetryTimeout;
@@ -196,7 +196,7 @@ public class BlockStorageMovementAttemptedItems {
         try {
           blockStorageMovementResultCheck();
           blocksStorageMovementUnReportedItemsCheck();
-          Thread.sleep(checkTimeout);
+          Thread.sleep(minCheckTimeout);
         } catch (InterruptedException ie) {
           LOG.info("BlocksStorageMovementAttemptResultMonitor thread "
               + "is interrupted.", ie);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40ced2b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a854bd7..ee59617 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -27,7 +27,9 @@ import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@@ -79,15 +81,18 @@ public class StoragePolicySatisfier implements Runnable {
 
   public StoragePolicySatisfier(final Namesystem namesystem,
       final BlockStorageMovementNeeded storageMovementNeeded,
-      final BlockManager blkManager) {
+      final BlockManager blkManager, Configuration conf) {
     this.namesystem = namesystem;
     this.storageMovementNeeded = storageMovementNeeded;
     this.blockManager = blkManager;
-    // TODO: below selfRetryTimeout and checkTimeout can be configurable later
-    // Now, the default values of selfRetryTimeout and checkTimeout are 30mins
-    // and 5mins respectively
     this.storageMovementsMonitor = new BlockStorageMovementAttemptedItems(
-        5 * 60 * 1000, 30 * 60 * 1000, storageMovementNeeded);
+        conf.getLong(
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT),
+        conf.getLong(
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT),
+        storageMovementNeeded);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40ced2b0/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 86df006..356540c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4291,6 +4291,29 @@
 </property>
 
 <property>
+  <name>dfs.storage.policy.satisfier.recheck.timeout.millis</name>
+  <value>300000</value>
+  <description>
+    Blocks storage movements monitor re-check interval in milliseconds.
+    This check will verify whether any blocks storage movement results arrived from DN
+    and also verify if any of file blocks movements not at all reported to DN
+    since dfs.storage.policy.satisfier.self.retry.timeout.
+    The default value is 5 * 60 * 1000 (5 mins)
+  </description>
+</property>
+
+<property>
+  <name>dfs.storage.policy.satisfier.self.retry.timeout.millis</name>
+  <value>1800000</value>
+  <description>
+    If any of file related block movements not at all reported by coordinator datanode,
+    then after this timeout(in milliseconds), the item will be added back to movement needed list
+    at namenode which will be retried for block movements.
+    The default value is 30 * 60 * 1000 (30 mins)
+  </description>
+</property>
+
+<property>
   <name>dfs.pipeline.ecn</name>
   <value>false</value>
   <description>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HDFS-11248: [SPS]: Handle partial block location movements. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-11248: [SPS]: Handle partial block location movements. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a09c7d43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a09c7d43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a09c7d43

Branch: refs/heads/HDFS-10285
Commit: a09c7d43d93a7cc43513104ef6c548fa669586d0
Parents: f18deae
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Wed Dec 28 23:21:07 2016 -0800
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:54:36 2017 +0530

----------------------------------------------------------------------
 .../datanode/BlockStorageMovementTracker.java   |  15 --
 .../datanode/StoragePolicySatisfyWorker.java    |  15 +-
 .../BlockStorageMovementAttemptedItems.java     | 206 +++++++++++++-----
 .../server/namenode/StoragePolicySatisfier.java | 215 +++++++++++++------
 .../TestBlockStorageMovementAttemptedItems.java | 101 ++++++++-
 .../namenode/TestStoragePolicySatisfier.java    |  63 +++++-
 6 files changed, 454 insertions(+), 161 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a09c7d43/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index 2de88fc..bd35b09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -28,7 +28,6 @@ import java.util.concurrent.Future;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementResult;
-import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementStatus;
 import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlocksMovementsCompletionHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -109,20 +108,6 @@ public class BlockStorageMovementTracker implements Runnable {
     }
   }
 
-  /**
-   * Mark as block movement failure for the given trackId and blockId.
-   *
-   * @param trackId tracking id
-   * @param blockId block id
-   */
-  void markBlockMovementFailure(long trackId, long blockId) {
-    LOG.debug("Mark as block movement failure for the given "
-        + "trackId:{} and blockId:{}", trackId, blockId);
-    BlockMovementResult result = new BlockMovementResult(trackId, blockId, null,
-        BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE);
-    addMovementResultToTrackIdList(result);
-  }
-
   private List<BlockMovementResult> addMovementResultToTrackIdList(
       BlockMovementResult result) {
     long trackId = result.getTrackId();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a09c7d43/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 19f3fe2..10adbfd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -154,18 +154,9 @@ public class StoragePolicySatisfyWorker {
       Collection<BlockMovingInfo> blockMovingInfos) {
     LOG.debug("Received BlockMovingTasks {}", blockMovingInfos);
     for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
-      // Iterating backwards. This is to ensure that all the block src location
-      // which doesn't have a target node will be marked as failure before
-      // scheduling the block movement to valid target nodes.
-      for (int i = blkMovingInfo.getSources().length - 1; i >= 0; i--) {
-        if (i >= blkMovingInfo.getTargets().length) {
-          // Since there is no target selected for scheduling the block,
-          // just mark this block storage movement as failure. Later, namenode
-          // can take action on this.
-          movementTracker.markBlockMovementFailure(trackID,
-              blkMovingInfo.getBlock().getBlockId());
-          continue;
-        }
+      assert blkMovingInfo.getSources().length == blkMovingInfo
+          .getTargets().length;
+      for (int i = 0; i < blkMovingInfo.getSources().length; i++) {
         DatanodeInfo target = blkMovingInfo.getTargets()[i];
         BlockMovingTask blockMovingTask = new BlockMovingTask(
             trackID, blockPoolID, blkMovingInfo.getBlock(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a09c7d43/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index bb26082..ce97075 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -43,11 +43,14 @@ import com.google.common.annotations.VisibleForTesting;
  * automatically after timeout. The default timeout would be 30mins.
  */
 public class BlockStorageMovementAttemptedItems {
-  public static final Logger LOG =
+  private static final Logger LOG =
       LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class);
-  // A map holds the items which are already taken for blocks movements
-  // processing and sent to DNs.
-  private final Map<Long, Long> storageMovementAttemptedItems;
+
+  /**
+   * A map holds the items which are already taken for blocks movements
+   * processing and sent to DNs.
+   */
+  private final Map<Long, ItemInfo> storageMovementAttemptedItems;
   private final List<BlocksStorageMovementResult> storageMovementAttemptedResults;
   private volatile boolean monitorRunning = true;
   private Daemon timerThread = null;
@@ -83,10 +86,16 @@ public class BlockStorageMovementAttemptedItems {
    *
    * @param blockCollectionID
    *          - tracking id / block collection id
+   * @param allBlockLocsAttemptedToSatisfy
+   *          - failed to find matching target nodes to satisfy storage type for
+   *          all the block locations of the given blockCollectionID
    */
-  public void add(Long blockCollectionID) {
+  public void add(Long blockCollectionID,
+      boolean allBlockLocsAttemptedToSatisfy) {
     synchronized (storageMovementAttemptedItems) {
-      storageMovementAttemptedItems.put(blockCollectionID, monotonicNow());
+      ItemInfo itemInfo = new ItemInfo(monotonicNow(),
+          allBlockLocsAttemptedToSatisfy);
+      storageMovementAttemptedItems.put(blockCollectionID, itemInfo);
     }
   }
 
@@ -121,15 +130,62 @@ public class BlockStorageMovementAttemptedItems {
    */
   public synchronized void stop() {
     monitorRunning = false;
-    timerThread.interrupt();
-    try {
-      timerThread.join(3000);
-    } catch (InterruptedException ie) {
+    if (timerThread != null) {
+      timerThread.interrupt();
+      try {
+        timerThread.join(3000);
+      } catch (InterruptedException ie) {
+      }
     }
     this.clearQueues();
   }
 
   /**
+   * This class contains information of an attempted trackID. Information such
+   * as, (a)last attempted time stamp, (b)whether all the blocks in the trackID
+   * were attempted and blocks movement has been scheduled to satisfy storage
+   * policy. This is used by
+   * {@link BlockStorageMovementAttemptedItems#storageMovementAttemptedItems}.
+   */
+  private final static class ItemInfo {
+    private final long lastAttemptedTimeStamp;
+    private final boolean allBlockLocsAttemptedToSatisfy;
+
+    /**
+     * ItemInfo constructor.
+     *
+     * @param lastAttemptedTimeStamp
+     *          last attempted time stamp
+     * @param allBlockLocsAttemptedToSatisfy
+     *          whether all the blocks in the trackID were attempted and blocks
+     *          movement has been scheduled to satisfy storage policy
+     */
+    private ItemInfo(long lastAttemptedTimeStamp,
+        boolean allBlockLocsAttemptedToSatisfy) {
+      this.lastAttemptedTimeStamp = lastAttemptedTimeStamp;
+      this.allBlockLocsAttemptedToSatisfy = allBlockLocsAttemptedToSatisfy;
+    }
+
+    /**
+     * @return last attempted time stamp.
+     */
+    private long getLastAttemptedTimeStamp() {
+      return lastAttemptedTimeStamp;
+    }
+
+    /**
+     * @return true/false. True value represents that, all the block locations
+     *         under the trackID has found matching target nodes to satisfy
+     *         storage policy. False value represents that, trackID needed
+     *         retries to satisfy the storage policy for some of the block
+     *         locations.
+     */
+    private boolean isAllBlockLocsAttemptedToSatisfy() {
+      return allBlockLocsAttemptedToSatisfy;
+    }
+  }
+
+  /**
    * A monitor class for checking block storage movement result and long waiting
    * items periodically.
    */
@@ -147,76 +203,108 @@ public class BlockStorageMovementAttemptedItems {
         }
       }
     }
+  }
 
-    private void blocksStorageMovementUnReportedItemsCheck() {
-      synchronized (storageMovementAttemptedItems) {
-        Iterator<Entry<Long, Long>> iter =
-            storageMovementAttemptedItems.entrySet().iterator();
-        long now = monotonicNow();
-        while (iter.hasNext()) {
-          Entry<Long, Long> entry = iter.next();
-          if (now > entry.getValue() + selfRetryTimeout) {
-            Long blockCollectionID = entry.getKey();
-            synchronized (storageMovementAttemptedResults) {
-              boolean exist = isExistInResult(blockCollectionID);
-              if (!exist) {
-                blockStorageMovementNeeded.add(blockCollectionID);
-              } else {
-                LOG.info("Blocks storage movement results for the"
-                    + " tracking id : " + blockCollectionID
-                    + " is reported from one of the co-ordinating datanode."
-                    + " So, the result will be processed soon.");
-              }
+  @VisibleForTesting
+  void blocksStorageMovementUnReportedItemsCheck() {
+    synchronized (storageMovementAttemptedItems) {
+      Iterator<Entry<Long, ItemInfo>> iter = storageMovementAttemptedItems
+          .entrySet().iterator();
+      long now = monotonicNow();
+      while (iter.hasNext()) {
+        Entry<Long, ItemInfo> entry = iter.next();
+        ItemInfo itemInfo = entry.getValue();
+        if (now > itemInfo.getLastAttemptedTimeStamp() + selfRetryTimeout) {
+          Long blockCollectionID = entry.getKey();
+          synchronized (storageMovementAttemptedResults) {
+            if (!isExistInResult(blockCollectionID)) {
+              blockStorageMovementNeeded.add(blockCollectionID);
               iter.remove();
+              LOG.info("TrackID: {} becomes timed out and moved to needed "
+                  + "retries queue for next iteration.", blockCollectionID);
+            } else {
+              LOG.info("Blocks storage movement results for the"
+                  + " tracking id : " + blockCollectionID
+                  + " is reported from one of the co-ordinating datanode."
+                  + " So, the result will be processed soon.");
             }
           }
         }
-
       }
+
     }
+  }
 
-    private boolean isExistInResult(Long blockCollectionID) {
-      Iterator<BlocksStorageMovementResult> iter =
-          storageMovementAttemptedResults.iterator();
-      while (iter.hasNext()) {
-        BlocksStorageMovementResult storageMovementAttemptedResult =
-            iter.next();
-        if (storageMovementAttemptedResult.getTrackId() == blockCollectionID) {
-          return true;
-        }
+  private boolean isExistInResult(Long blockCollectionID) {
+    Iterator<BlocksStorageMovementResult> iter = storageMovementAttemptedResults
+        .iterator();
+    while (iter.hasNext()) {
+      BlocksStorageMovementResult storageMovementAttemptedResult = iter.next();
+      if (storageMovementAttemptedResult.getTrackId() == blockCollectionID) {
+        return true;
       }
-      return false;
     }
+    return false;
+  }
 
-    private void blockStorageMovementResultCheck() {
-      synchronized (storageMovementAttemptedResults) {
-        Iterator<BlocksStorageMovementResult> iter =
-            storageMovementAttemptedResults.iterator();
-        while (iter.hasNext()) {
-          BlocksStorageMovementResult storageMovementAttemptedResult =
-              iter.next();
+  @VisibleForTesting
+  void blockStorageMovementResultCheck() {
+    synchronized (storageMovementAttemptedResults) {
+      Iterator<BlocksStorageMovementResult> resultsIter =
+          storageMovementAttemptedResults.iterator();
+      while (resultsIter.hasNext()) {
+        // TrackID need to be retried in the following cases:
+        // 1) All or few scheduled block(s) movement has been failed.
+        // 2) All the scheduled block(s) movement has been succeeded but there
+        // are unscheduled block(s) movement in this trackID. Say, some of
+        // the blocks in the trackID couldn't finding any matching target node
+        // for scheduling block movement in previous SPS iteration.
+        BlocksStorageMovementResult storageMovementAttemptedResult = resultsIter
+            .next();
+        synchronized (storageMovementAttemptedItems) {
           if (storageMovementAttemptedResult
               .getStatus() == BlocksStorageMovementResult.Status.FAILURE) {
             blockStorageMovementNeeded
                 .add(storageMovementAttemptedResult.getTrackId());
-            LOG.warn("Blocks storage movement results for the tracking id : "
-                + storageMovementAttemptedResult.getTrackId()
+            LOG.warn("Blocks storage movement results for the tracking id: {}"
                 + " is reported from co-ordinating datanode, but result"
-                + " status is FAILURE. So, added for retry");
+                + " status is FAILURE. So, added for retry",
+                storageMovementAttemptedResult.getTrackId());
           } else {
-            synchronized (storageMovementAttemptedItems) {
-              storageMovementAttemptedItems
-                  .remove(storageMovementAttemptedResult.getTrackId());
+            ItemInfo itemInfo = storageMovementAttemptedItems
+                .get(storageMovementAttemptedResult.getTrackId());
+
+            // ItemInfo could be null. One case is, before the blocks movements
+            // result arrives the attempted trackID became timed out and then
+            // removed the trackID from the storageMovementAttemptedItems list.
+            // TODO: Need to ensure that trackID is added to the
+            // 'blockStorageMovementNeeded' queue for retries to handle the
+            // following condition. If all the block locations under the trackID
+            // are attempted and failed to find matching target nodes to satisfy
+            // storage policy in previous SPS iteration.
+            if (itemInfo != null
+                && !itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
+              blockStorageMovementNeeded
+                  .add(storageMovementAttemptedResult.getTrackId());
+              LOG.warn("Blocks storage movement is SUCCESS for the track id: {}"
+                  + " reported from co-ordinating datanode. But adding trackID"
+                  + " back to retry queue as some of the blocks couldn't find"
+                  + " matching target nodes in previous SPS iteration.",
+                  storageMovementAttemptedResult.getTrackId());
+            } else {
+              LOG.info("Blocks storage movement is SUCCESS for the track id: {}"
+                  + " reported from co-ordinating datanode. But the trackID "
+                  + "doesn't exists in storageMovementAttemptedItems list",
+                  storageMovementAttemptedResult.getTrackId());
             }
-            LOG.info("Blocks storage movement results for the tracking id : "
-                + storageMovementAttemptedResult.getTrackId()
-                + " is reported from co-ordinating datanode. "
-                + "The result status is SUCCESS.");
           }
-          iter.remove(); // remove from results as processed above
+          // Remove trackID from the attempted list, if any.
+          storageMovementAttemptedItems
+              .remove(storageMovementAttemptedResult.getTrackId());
         }
+        // Remove trackID from results as processed above.
+        resultsIter.remove();
       }
-
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a09c7d43/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 56a531f..26e0775 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -162,8 +162,15 @@ public class StoragePolicySatisfier implements Runnable {
       try {
         Long blockCollectionID = storageMovementNeeded.get();
         if (blockCollectionID != null) {
-          computeAndAssignStorageMismatchedBlocksToDNs(blockCollectionID);
-          this.storageMovementsMonitor.add(blockCollectionID);
+          BlockCollection blockCollection =
+              namesystem.getBlockCollection(blockCollectionID);
+          // Check blockCollectionId existence.
+          if (blockCollection != null) {
+            boolean allBlockLocsAttemptedToSatisfy =
+                computeAndAssignStorageMismatchedBlocksToDNs(blockCollection);
+            this.storageMovementsMonitor.add(blockCollectionID,
+                allBlockLocsAttemptedToSatisfy);
+          }
         }
         // TODO: We can think to make this as configurable later, how frequently
         // we want to check block movements.
@@ -192,20 +199,17 @@ public class StoragePolicySatisfier implements Runnable {
     }
   }
 
-  private void computeAndAssignStorageMismatchedBlocksToDNs(
-      long blockCollectionID) {
-    BlockCollection blockCollection =
-        namesystem.getBlockCollection(blockCollectionID);
-    if (blockCollection == null) {
-      return;
-    }
+  private boolean computeAndAssignStorageMismatchedBlocksToDNs(
+      BlockCollection blockCollection) {
     byte existingStoragePolicyID = blockCollection.getStoragePolicyID();
     BlockStoragePolicy existingStoragePolicy =
         blockManager.getStoragePolicy(existingStoragePolicyID);
     if (!blockCollection.getLastBlock().isComplete()) {
       // Postpone, currently file is under construction
       // So, should we add back? or leave it to user
-      return;
+      LOG.info("BlockCollectionID: {} file is under construction. So, postpone"
+          + " this to the next retry iteration", blockCollection.getId());
+      return true;
     }
 
     // First datanode will be chosen as the co-ordinator node for storage
@@ -213,61 +217,87 @@ public class StoragePolicySatisfier implements Runnable {
     DatanodeDescriptor coordinatorNode = null;
     BlockInfo[] blocks = blockCollection.getBlocks();
     List<BlockMovingInfo> blockMovingInfos = new ArrayList<BlockMovingInfo>();
+
+    // True value represents that, SPS is able to find matching target nodes
+    // to satisfy storage type for all the blocks locations of the given
+    // blockCollection. A false value represents that, blockCollection needed
+    // retries to satisfy the storage policy for some of the block locations.
+    boolean foundMatchingTargetNodesForAllBlocks = true;
+
     for (int i = 0; i < blocks.length; i++) {
       BlockInfo blockInfo = blocks[i];
-      List<StorageType> expectedStorageTypes =
-          existingStoragePolicy.chooseStorageTypes(blockInfo.getReplication());
-      DatanodeStorageInfo[] storages = blockManager.getStorages(blockInfo);
-      StorageType[] storageTypes = new StorageType[storages.length];
-      for (int j = 0; j < storages.length; j++) {
-        DatanodeStorageInfo datanodeStorageInfo = storages[j];
-        StorageType storageType = datanodeStorageInfo.getStorageType();
-        storageTypes[j] = storageType;
-      }
-      List<StorageType> existing =
-          new LinkedList<StorageType>(Arrays.asList(storageTypes));
-      if (!DFSUtil.removeOverlapBetweenStorageTypes(expectedStorageTypes,
-          existing, true)) {
-        List<StorageTypeNodePair> sourceWithStorageMap =
-            new ArrayList<StorageTypeNodePair>();
-        List<DatanodeStorageInfo> existingBlockStorages =
-            new ArrayList<DatanodeStorageInfo>(Arrays.asList(storages));
-        for (StorageType existingType : existing) {
-          Iterator<DatanodeStorageInfo> iterator =
-              existingBlockStorages.iterator();
-          while (iterator.hasNext()) {
-            DatanodeStorageInfo datanodeStorageInfo = iterator.next();
-            StorageType storageType = datanodeStorageInfo.getStorageType();
-            if (storageType == existingType) {
-              iterator.remove();
-              sourceWithStorageMap.add(new StorageTypeNodePair(storageType,
-                  datanodeStorageInfo.getDatanodeDescriptor()));
-              break;
-            }
-          }
-        }
+      List<StorageType> expectedStorageTypes = existingStoragePolicy
+            .chooseStorageTypes(blockInfo.getReplication());
+      foundMatchingTargetNodesForAllBlocks |= computeBlockMovingInfos(
+          blockMovingInfos, blockInfo, expectedStorageTypes);
+    }
 
-        StorageTypeNodeMap locsForExpectedStorageTypes =
-            findTargetsForExpectedStorageTypes(expectedStorageTypes);
-
-        BlockMovingInfo blockMovingInfo =
-            findSourceAndTargetToMove(blockInfo, existing, sourceWithStorageMap,
-                expectedStorageTypes, locsForExpectedStorageTypes);
-        if (coordinatorNode == null) {
-          // For now, first datanode will be chosen as the co-ordinator. Later
-          // this can be optimized if needed.
-          coordinatorNode =
-              (DatanodeDescriptor) blockMovingInfo.getSources()[0];
+    assignBlockMovingInfosToCoordinatorDn(blockCollection.getId(),
+        blockMovingInfos, coordinatorNode);
+    return foundMatchingTargetNodesForAllBlocks;
+  }
+
+  /**
+   * Compute the list of block moving information corresponding to the given
+   * blockId. This will check that each block location of the given block is
+   * satisfying the expected storage policy. If block location is not satisfied
+   * the policy then find out the target node with the expected storage type to
+   * satisfy the storage policy.
+   *
+   * @param blockMovingInfos
+   *          - list of block source and target node pair
+   * @param blockInfo
+   *          - block details
+   * @param expectedStorageTypes
+   *          - list of expected storage type to satisfy the storage policy
+   * @return false if some of the block locations failed to find target node to
+   *         satisfy the storage policy, true otherwise
+   */
+  private boolean computeBlockMovingInfos(
+      List<BlockMovingInfo> blockMovingInfos, BlockInfo blockInfo,
+      List<StorageType> expectedStorageTypes) {
+    boolean foundMatchingTargetNodesForBlock = true;
+    DatanodeStorageInfo[] storages = blockManager.getStorages(blockInfo);
+    StorageType[] storageTypes = new StorageType[storages.length];
+    for (int j = 0; j < storages.length; j++) {
+      DatanodeStorageInfo datanodeStorageInfo = storages[j];
+      StorageType storageType = datanodeStorageInfo.getStorageType();
+      storageTypes[j] = storageType;
+    }
+    List<StorageType> existing =
+        new LinkedList<StorageType>(Arrays.asList(storageTypes));
+    if (!DFSUtil.removeOverlapBetweenStorageTypes(expectedStorageTypes,
+        existing, true)) {
+      List<StorageTypeNodePair> sourceWithStorageMap =
+          new ArrayList<StorageTypeNodePair>();
+      List<DatanodeStorageInfo> existingBlockStorages =
+          new ArrayList<DatanodeStorageInfo>(Arrays.asList(storages));
+      for (StorageType existingType : existing) {
+        Iterator<DatanodeStorageInfo> iterator =
+            existingBlockStorages.iterator();
+        while (iterator.hasNext()) {
+          DatanodeStorageInfo datanodeStorageInfo = iterator.next();
+          StorageType storageType = datanodeStorageInfo.getStorageType();
+          if (storageType == existingType) {
+            iterator.remove();
+            sourceWithStorageMap.add(new StorageTypeNodePair(storageType,
+                datanodeStorageInfo.getDatanodeDescriptor()));
+            break;
+          }
         }
-        blockMovingInfos.add(blockMovingInfo);
       }
-    }
 
-    addBlockMovingInfosToCoordinatorDn(blockCollectionID, blockMovingInfos,
-        coordinatorNode);
+      StorageTypeNodeMap locsForExpectedStorageTypes =
+          findTargetsForExpectedStorageTypes(expectedStorageTypes);
+
+      foundMatchingTargetNodesForBlock |= findSourceAndTargetToMove(
+          blockMovingInfos, blockInfo, existing, sourceWithStorageMap,
+          expectedStorageTypes, locsForExpectedStorageTypes);
+    }
+    return foundMatchingTargetNodesForBlock;
   }
 
-  private void addBlockMovingInfosToCoordinatorDn(long blockCollectionID,
+  private void assignBlockMovingInfosToCoordinatorDn(long blockCollectionID,
       List<BlockMovingInfo> blockMovingInfos,
       DatanodeDescriptor coordinatorNode) {
 
@@ -278,6 +308,11 @@ public class StoragePolicySatisfier implements Runnable {
       return;
     }
 
+    // For now, first datanode will be chosen as the co-ordinator. Later
+    // this can be optimized if needed.
+    coordinatorNode = (DatanodeDescriptor) blockMovingInfos.get(0)
+        .getSources()[0];
+
     boolean needBlockStorageMovement = false;
     for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
       // Check for atleast one block storage movement has been chosen
@@ -301,6 +336,8 @@ public class StoragePolicySatisfier implements Runnable {
    * Find the good target node for each source node for which block storages was
    * misplaced.
    *
+   * @param blockMovingInfos
+   *          - list of block source and target node pair
    * @param blockInfo
    *          - Block
    * @param existing
@@ -311,23 +348,49 @@ public class StoragePolicySatisfier implements Runnable {
    *          - Expecting storages to move
    * @param locsForExpectedStorageTypes
    *          - Available DNs for expected storage types
-   * @return list of block source and target node pair
+   * @return false if some of the block locations failed to find target node to
+   *         satisfy the storage policy
    */
-  private BlockMovingInfo findSourceAndTargetToMove(BlockInfo blockInfo,
+  private boolean findSourceAndTargetToMove(
+      List<BlockMovingInfo> blockMovingInfos, BlockInfo blockInfo,
       List<StorageType> existing,
       List<StorageTypeNodePair> sourceWithStorageList,
       List<StorageType> expected,
       StorageTypeNodeMap locsForExpectedStorageTypes) {
+    boolean foundMatchingTargetNodesForBlock = true;
     List<DatanodeInfo> sourceNodes = new ArrayList<>();
     List<StorageType> sourceStorageTypes = new ArrayList<>();
     List<DatanodeInfo> targetNodes = new ArrayList<>();
     List<StorageType> targetStorageTypes = new ArrayList<>();
     List<DatanodeDescriptor> chosenNodes = new ArrayList<>();
+
+    // Looping over all the source node locations and choose the target
+    // storage within same node if possible. This is done separately to
+    // avoid choosing a target which already has this block.
     for (int i = 0; i < sourceWithStorageList.size(); i++) {
       StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
       StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(
           existingTypeNodePair.dn, expected);
+      if (chosenTarget != null) {
+        sourceNodes.add(existingTypeNodePair.dn);
+        sourceStorageTypes.add(existingTypeNodePair.storageType);
+        targetNodes.add(chosenTarget.dn);
+        targetStorageTypes.add(chosenTarget.storageType);
+        chosenNodes.add(chosenTarget.dn);
+        // TODO: We can increment scheduled block count for this node?
+      }
+    }
 
+    // Looping over all the source node locations. Choose a remote target
+    // storage node if it was not found out within same node.
+    for (int i = 0; i < sourceWithStorageList.size(); i++) {
+      StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
+      StorageTypeNodePair chosenTarget = null;
+      // Chosen the target storage within same datanode. So just skipping this
+      // source node.
+      if (sourceNodes.contains(existingTypeNodePair.dn)) {
+        continue;
+      }
       if (chosenTarget == null && blockManager.getDatanodeManager()
           .getNetworkTopology().isNodeGroupAware()) {
         chosenTarget = chooseTarget(blockInfo, existingTypeNodePair.dn,
@@ -359,18 +422,40 @@ public class StoragePolicySatisfier implements Runnable {
             "Failed to choose target datanode for the required"
                 + " storage types {}, block:{}, existing storage type:{}",
             expected, blockInfo, existingTypeNodePair.storageType);
-        sourceNodes.add(existingTypeNodePair.dn);
-        sourceStorageTypes.add(existingTypeNodePair.storageType);
-        // Imp: Not setting the target details, empty targets. Later, this is
-        // used as an indicator for retrying this block movement.
+        foundMatchingTargetNodesForBlock = false;
       }
     }
-    BlockMovingInfo blkMovingInfo = new BlockMovingInfo(blockInfo,
+
+    blockMovingInfos.addAll(getBlockMovingInfos(blockInfo, sourceNodes,
+        sourceStorageTypes, targetNodes, targetStorageTypes));
+    return foundMatchingTargetNodesForBlock;
+  }
+
+  private List<BlockMovingInfo> getBlockMovingInfos(BlockInfo blockInfo,
+      List<DatanodeInfo> sourceNodes, List<StorageType> sourceStorageTypes,
+      List<DatanodeInfo> targetNodes, List<StorageType> targetStorageTypes) {
+    List<BlockMovingInfo> blkMovingInfos = new ArrayList<>();
+    // No source-target node pair exists.
+    if (sourceNodes.size() <= 0) {
+      return blkMovingInfos;
+    }
+    buildBlockMovingInfos(blockInfo, sourceNodes, sourceStorageTypes,
+        targetNodes, targetStorageTypes, blkMovingInfos);
+    return blkMovingInfos;
+  }
+
+  private void buildBlockMovingInfos(BlockInfo blockInfo,
+      List<DatanodeInfo> sourceNodes, List<StorageType> sourceStorageTypes,
+      List<DatanodeInfo> targetNodes, List<StorageType> targetStorageTypes,
+      List<BlockMovingInfo> blkMovingInfos) {
+    Block blk = new Block(blockInfo.getBlockId(), blockInfo.getNumBytes(),
+        blockInfo.getGenerationStamp());
+    BlockMovingInfo blkMovingInfo = new BlockMovingInfo(blk,
         sourceNodes.toArray(new DatanodeInfo[sourceNodes.size()]),
         targetNodes.toArray(new DatanodeInfo[targetNodes.size()]),
         sourceStorageTypes.toArray(new StorageType[sourceStorageTypes.size()]),
         targetStorageTypes.toArray(new StorageType[targetStorageTypes.size()]));
-    return blkMovingInfo;
+    blkMovingInfos.add(blkMovingInfo);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a09c7d43/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
index 8c70d99..6641134 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
@@ -33,13 +33,13 @@ public class TestBlockStorageMovementAttemptedItems {
 
   private BlockStorageMovementAttemptedItems bsmAttemptedItems = null;
   private BlockStorageMovementNeeded unsatisfiedStorageMovementFiles = null;
+  private final int selfRetryTimeout = 500;
 
   @Before
   public void setup() {
     unsatisfiedStorageMovementFiles = new BlockStorageMovementNeeded();
-    bsmAttemptedItems = new BlockStorageMovementAttemptedItems(100, 500,
-        unsatisfiedStorageMovementFiles);
-    bsmAttemptedItems.start();
+    bsmAttemptedItems = new BlockStorageMovementAttemptedItems(100,
+        selfRetryTimeout, unsatisfiedStorageMovementFiles);
   }
 
   @After
@@ -72,8 +72,9 @@ public class TestBlockStorageMovementAttemptedItems {
 
   @Test(timeout = 30000)
   public void testAddResultWithFailureResult() throws Exception {
+    bsmAttemptedItems.start(); // start block movement result monitor thread
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item);
+    bsmAttemptedItems.add(item, true);
     bsmAttemptedItems.addResults(
         new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
             item.longValue(), BlocksStorageMovementResult.Status.FAILURE)});
@@ -82,8 +83,9 @@ public class TestBlockStorageMovementAttemptedItems {
 
   @Test(timeout = 30000)
   public void testAddResultWithSucessResult() throws Exception {
+    bsmAttemptedItems.start(); // start block movement result monitor thread
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item);
+    bsmAttemptedItems.add(item, true);
     bsmAttemptedItems.addResults(
         new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
             item.longValue(), BlocksStorageMovementResult.Status.SUCCESS)});
@@ -92,10 +94,93 @@ public class TestBlockStorageMovementAttemptedItems {
 
   @Test(timeout = 30000)
   public void testNoResultAdded() throws Exception {
+    bsmAttemptedItems.start(); // start block movement result monitor thread
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item);
-    // After selfretry timeout, it should be added back for retry
-    assertTrue(checkItemMovedForRetry(item, 600));
+    bsmAttemptedItems.add(item, true);
+    // After self retry timeout, it should be added back for retry
+    assertTrue("Failed to add to the retry list",
+        checkItemMovedForRetry(item, 600));
+    assertEquals("Failed to remove from the attempted list", 0,
+        bsmAttemptedItems.getAttemptedItemsCount());
   }
 
+  /**
+   * Partial block movement with BlocksStorageMovementResult#SUCCESS. Here,
+   * first occurrence is #blockStorageMovementResultCheck() and then
+   * #blocksStorageMovementUnReportedItemsCheck().
+   */
+  @Test(timeout = 30000)
+  public void testPartialBlockMovementShouldBeRetried1() throws Exception {
+    Long item = new Long(1234);
+    bsmAttemptedItems.add(item, false);
+    bsmAttemptedItems.addResults(
+        new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
+            item.longValue(), BlocksStorageMovementResult.Status.SUCCESS)});
+
+    // start block movement result monitor thread
+    bsmAttemptedItems.start();
+    assertTrue("Failed to add to the retry list",
+        checkItemMovedForRetry(item, 5000));
+    assertEquals("Failed to remove from the attempted list", 0,
+        bsmAttemptedItems.getAttemptedItemsCount());
+  }
+
+  /**
+   * Partial block movement with BlocksStorageMovementResult#SUCCESS. Here,
+   * first occurrence is #blocksStorageMovementUnReportedItemsCheck() and then
+   * #blockStorageMovementResultCheck().
+   */
+  @Test(timeout = 30000)
+  public void testPartialBlockMovementShouldBeRetried2() throws Exception {
+    Long item = new Long(1234);
+    bsmAttemptedItems.add(item, false);
+    bsmAttemptedItems.addResults(
+        new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
+            item.longValue(), BlocksStorageMovementResult.Status.SUCCESS)});
+
+    Thread.sleep(selfRetryTimeout * 2); // Waiting to get timed out
+
+    bsmAttemptedItems.blocksStorageMovementUnReportedItemsCheck();
+    bsmAttemptedItems.blockStorageMovementResultCheck();
+
+    assertTrue("Failed to add to the retry list",
+        checkItemMovedForRetry(item, 5000));
+    assertEquals("Failed to remove from the attempted list", 0,
+        bsmAttemptedItems.getAttemptedItemsCount());
+  }
+
+  /**
+   * Partial block movement with only BlocksStorageMovementResult#FAILURE result
+   * and storageMovementAttemptedItems list is empty.
+   */
+  @Test(timeout = 30000)
+  public void testPartialBlockMovementShouldBeRetried3() throws Exception {
+    Long item = new Long(1234);
+    bsmAttemptedItems.addResults(
+        new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
+            item.longValue(), BlocksStorageMovementResult.Status.FAILURE)});
+    bsmAttemptedItems.blockStorageMovementResultCheck();
+    assertTrue("Failed to add to the retry list",
+        checkItemMovedForRetry(item, 5000));
+    assertEquals("Failed to remove from the attempted list", 0,
+        bsmAttemptedItems.getAttemptedItemsCount());
+  }
+
+  /**
+   * Partial block movement with BlocksStorageMovementResult#FAILURE result and
+   * storageMovementAttemptedItems.
+   */
+  @Test(timeout = 30000)
+  public void testPartialBlockMovementShouldBeRetried4() throws Exception {
+    Long item = new Long(1234);
+    bsmAttemptedItems.add(item, false);
+    bsmAttemptedItems.addResults(
+        new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
+            item.longValue(), BlocksStorageMovementResult.Status.FAILURE)});
+    bsmAttemptedItems.blockStorageMovementResultCheck();
+    assertTrue("Failed to add to the retry list",
+        checkItemMovedForRetry(item, 5000));
+    assertEquals("Failed to remove from the attempted list", 0,
+        bsmAttemptedItems.getAttemptedItemsCount());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a09c7d43/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 179b66b..718dbcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -527,6 +527,59 @@ public class TestStoragePolicySatisfier {
     waitExpectedStorageType(file1, StorageType.DISK, 2, 30000);
   }
 
+  /**
+   * Tests to verify that for the given path, only few of the blocks or block
+   * src locations(src nodes) under the given path will be scheduled for block
+   * movement.
+   *
+   * For example, there are two block for a file:
+   *
+   * File1 => two blocks and default storage policy(HOT).
+   * blk_1[locations=A(DISK),B(DISK),C(DISK),D(DISK),E(DISK)],
+   * blk_2[locations=A(DISK),B(DISK),C(DISK),D(DISK),E(DISK)].
+   *
+   * Now, set storage policy to COLD.
+   * Only two Dns are available with expected storage type ARCHIVE, say A, E.
+   *
+   * SPS will schedule block movement to the coordinator node with the details,
+   * blk_1[move A(DISK) -> A(ARCHIVE), move E(DISK) -> E(ARCHIVE)],
+   * blk_2[move A(DISK) -> A(ARCHIVE), move E(DISK) -> E(ARCHIVE)].
+   */
+  @Test(timeout = 300000)
+  public void testWhenOnlyFewSourceNodesHaveMatchingTargetNodes()
+      throws Exception {
+    try {
+      int numOfDns = 5;
+      config.setLong("dfs.block.size", 1024);
+      allDiskTypes =
+          new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+              {StorageType.DISK, StorageType.DISK},
+              {StorageType.DISK, StorageType.DISK},
+              {StorageType.DISK, StorageType.DISK},
+              {StorageType.DISK, StorageType.ARCHIVE}};
+      hdfsCluster = startCluster(config, allDiskTypes, numOfDns,
+          storagesPerDatanode, capacity);
+      dfs = hdfsCluster.getFileSystem();
+      writeContent(file, (short) 5);
+
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(file), "COLD");
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      hdfsCluster.triggerHeartbeats();
+      // Wait till StorgePolicySatisfier identified that block to move to
+      // ARCHIVE area.
+      waitExpectedStorageType(file, StorageType.ARCHIVE, 2, 30000);
+      waitExpectedStorageType(file, StorageType.DISK, 3, 30000);
+
+      waitForBlocksMovementResult(1, 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
   private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
       throws IOException {
     ArrayList<DataNode> dns = hdfsCluster.getDataNodes();
@@ -561,7 +614,7 @@ public class TestStoragePolicySatisfier {
       DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
       favoredNodesCount--;
       if (favoredNodesCount <= 0) {
-        break;// marked favoredNodesCount number of pinned block location
+        break; // marked favoredNodesCount number of pinned block location
       }
     }
     return file1;
@@ -600,8 +653,14 @@ public class TestStoragePolicySatisfier {
   }
 
   private void writeContent(final String fileName) throws IOException {
+    writeContent(fileName, (short) 3);
+  }
+
+  private void writeContent(final String fileName, short replicatonFactor)
+      throws IOException {
     // write to DISK
-    final FSDataOutputStream out = dfs.create(new Path(fileName));
+    final FSDataOutputStream out = dfs.create(new Path(fileName),
+        replicatonFactor);
     for (int i = 0; i < 1000; i++) {
       out.writeChars("t");
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HDFS-11309. [SPS]: chooseTargetTypeInSameNode should pass accurate block size to chooseStorage4Block while choosing target. Contributed by Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-11309. [SPS]: chooseTargetTypeInSameNode should pass accurate block size to chooseStorage4Block while choosing target. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a27ab4ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a27ab4ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a27ab4ad

Branch: refs/heads/HDFS-10285
Commit: a27ab4ad941b9cd422d35ad0080f3d32efdcaead
Parents: 8b775bd
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Fri Jan 20 21:37:51 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:10 2017 +0530

----------------------------------------------------------------------
 .../server/namenode/StoragePolicySatisfier.java |  31 +++---
 .../namenode/TestStoragePolicySatisfier.java    | 108 ++++++++++++++++---
 2 files changed, 110 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a27ab4ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 3b19833..1c48910 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -403,24 +403,25 @@ public class StoragePolicySatisfier implements Runnable {
     List<StorageType> sourceStorageTypes = new ArrayList<>();
     List<DatanodeInfo> targetNodes = new ArrayList<>();
     List<StorageType> targetStorageTypes = new ArrayList<>();
-    List<DatanodeDescriptor> chosenNodes = new ArrayList<>();
+    List<DatanodeDescriptor> excludeNodes = new ArrayList<>();
 
     // Looping over all the source node locations and choose the target
     // storage within same node if possible. This is done separately to
     // avoid choosing a target which already has this block.
     for (int i = 0; i < sourceWithStorageList.size(); i++) {
       StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
-      StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(
+      StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(blockInfo,
           existingTypeNodePair.dn, expected);
       if (chosenTarget != null) {
         sourceNodes.add(existingTypeNodePair.dn);
         sourceStorageTypes.add(existingTypeNodePair.storageType);
         targetNodes.add(chosenTarget.dn);
         targetStorageTypes.add(chosenTarget.storageType);
-        chosenNodes.add(chosenTarget.dn);
         expected.remove(chosenTarget.storageType);
         // TODO: We can increment scheduled block count for this node?
       }
+      // To avoid choosing this excludeNodes as targets later
+      excludeNodes.add(existingTypeNodePair.dn);
     }
 
     // Looping over all the source node locations. Choose a remote target
@@ -437,28 +438,28 @@ public class StoragePolicySatisfier implements Runnable {
           .getNetworkTopology().isNodeGroupAware()) {
         chosenTarget = chooseTarget(blockInfo, existingTypeNodePair.dn,
             expected, Matcher.SAME_NODE_GROUP, locsForExpectedStorageTypes,
-            chosenNodes);
+            excludeNodes);
       }
 
       // Then, match nodes on the same rack
       if (chosenTarget == null) {
         chosenTarget =
             chooseTarget(blockInfo, existingTypeNodePair.dn, expected,
-                Matcher.SAME_RACK, locsForExpectedStorageTypes, chosenNodes);
+                Matcher.SAME_RACK, locsForExpectedStorageTypes, excludeNodes);
       }
 
       if (chosenTarget == null) {
         chosenTarget =
             chooseTarget(blockInfo, existingTypeNodePair.dn, expected,
-                Matcher.ANY_OTHER, locsForExpectedStorageTypes, chosenNodes);
+                Matcher.ANY_OTHER, locsForExpectedStorageTypes, excludeNodes);
       }
       if (null != chosenTarget) {
         sourceNodes.add(existingTypeNodePair.dn);
         sourceStorageTypes.add(existingTypeNodePair.storageType);
         targetNodes.add(chosenTarget.dn);
         targetStorageTypes.add(chosenTarget.storageType);
-        chosenNodes.add(chosenTarget.dn);
         expected.remove(chosenTarget.storageType);
+        excludeNodes.add(chosenTarget.dn);
         // TODO: We can increment scheduled block count for this node?
       } else {
         LOG.warn(
@@ -554,14 +555,18 @@ public class StoragePolicySatisfier implements Runnable {
   /**
    * Choose the target storage within same datanode if possible.
    *
-   * @param source source datanode
-   * @param targetTypes list of target storage types
+   * @param block
+   *          - block info
+   * @param source
+   *          - source datanode
+   * @param targetTypes
+   *          - list of target storage types
    */
-  private StorageTypeNodePair chooseTargetTypeInSameNode(
+  private StorageTypeNodePair chooseTargetTypeInSameNode(Block block,
       DatanodeDescriptor source, List<StorageType> targetTypes) {
     for (StorageType t : targetTypes) {
       DatanodeStorageInfo chooseStorage4Block =
-          source.chooseStorage4Block(t, 0);
+          source.chooseStorage4Block(t, block.getNumBytes());
       if (chooseStorage4Block != null) {
         return new StorageTypeNodePair(t, source);
       }
@@ -572,7 +577,7 @@ public class StoragePolicySatisfier implements Runnable {
   private StorageTypeNodePair chooseTarget(Block block,
       DatanodeDescriptor source, List<StorageType> targetTypes, Matcher matcher,
       StorageTypeNodeMap locsForExpectedStorageTypes,
-      List<DatanodeDescriptor> chosenNodes) {
+      List<DatanodeDescriptor> excludeNodes) {
     for (StorageType t : targetTypes) {
       List<DatanodeDescriptor> nodesWithStorages =
           locsForExpectedStorageTypes.getNodesWithStorages(t);
@@ -581,7 +586,7 @@ public class StoragePolicySatisfier implements Runnable {
       }
       Collections.shuffle(nodesWithStorages);
       for (DatanodeDescriptor target : nodesWithStorages) {
-        if (!chosenNodes.contains(target) && matcher.match(
+        if (!excludeNodes.contains(target) && matcher.match(
             blockManager.getDatanodeManager().getNetworkTopology(), source,
             target)) {
           if (null != target.chooseStorage4Block(t, block.getNumBytes())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a27ab4ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 1c53894..de73e8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static org.junit.Assert.assertNull;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
@@ -57,6 +60,8 @@ import com.google.common.base.Supplier;
  * moved and finding its suggested target locations to move.
  */
 public class TestStoragePolicySatisfier {
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
   private static final Logger LOG =
       LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
   private final Configuration config = new HdfsConfiguration();
@@ -93,7 +98,7 @@ public class TestStoragePolicySatisfier {
     try {
       createCluster();
       // Change policy to COLD
-      dfs.setStoragePolicy(new Path(file), "COLD");
+      dfs.setStoragePolicy(new Path(file), COLD);
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -151,7 +156,7 @@ public class TestStoragePolicySatisfier {
     try {
       createCluster();
       // Change policy to ONE_SSD
-      dfs.setStoragePolicy(new Path(file), "ONE_SSD");
+      dfs.setStoragePolicy(new Path(file), ONE_SSD);
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -184,7 +189,7 @@ public class TestStoragePolicySatisfier {
     try {
       createCluster();
       // Change policy to ONE_SSD
-      dfs.setStoragePolicy(new Path(file), "ONE_SSD");
+      dfs.setStoragePolicy(new Path(file), ONE_SSD);
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -232,7 +237,7 @@ public class TestStoragePolicySatisfier {
       List<Long> blockCollectionIds = new ArrayList<>();
       // Change policy to ONE_SSD
       for (String fileName : files) {
-        dfs.setStoragePolicy(new Path(fileName), "ONE_SSD");
+        dfs.setStoragePolicy(new Path(fileName), ONE_SSD);
         INode inode = namesystem.getFSDirectory().getINode(fileName);
         blockCollectionIds.add(inode.getId());
       }
@@ -274,12 +279,12 @@ public class TestStoragePolicySatisfier {
       HdfsAdmin hdfsAdmin =
           new HdfsAdmin(FileSystem.getDefaultUri(config), config);
       // Change policy to COLD
-      dfs.setStoragePolicy(new Path(file), "COLD");
+      dfs.setStoragePolicy(new Path(file), COLD);
 
       StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
-              {StorageType.ARCHIVE, StorageType.ARCHIVE},
-              {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+          new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+              {StorageType.DISK, StorageType.ARCHIVE},
+              {StorageType.DISK, StorageType.ARCHIVE}};
       startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
           storagesPerDatanode, capacity, hdfsCluster);
 
@@ -314,7 +319,7 @@ public class TestStoragePolicySatisfier {
       writeContent(subFile2);
 
       // Change policy to COLD
-      dfs.setStoragePolicy(new Path(subDir), "ONE_SSD");
+      dfs.setStoragePolicy(new Path(subDir), ONE_SSD);
 
       StorageType[][] newtypes =
           new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
@@ -418,7 +423,7 @@ public class TestStoragePolicySatisfier {
     try {
       createCluster();
       // Change policy to COLD
-      dfs.setStoragePolicy(new Path(file), "COLD");
+      dfs.setStoragePolicy(new Path(file), COLD);
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -463,7 +468,7 @@ public class TestStoragePolicySatisfier {
     try {
       createCluster();
       // Change policy to COLD
-      dfs.setStoragePolicy(new Path(file), "COLD");
+      dfs.setStoragePolicy(new Path(file), COLD);
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -533,7 +538,7 @@ public class TestStoragePolicySatisfier {
     final String file1 = createFileAndSimulateFavoredNodes(2);
 
     // Change policy to COLD
-    dfs.setStoragePolicy(new Path(file1), "COLD");
+    dfs.setStoragePolicy(new Path(file1), COLD);
     FSNamesystem namesystem = hdfsCluster.getNamesystem();
     INode inode = namesystem.getFSDirectory().getINode(file1);
 
@@ -594,7 +599,7 @@ public class TestStoragePolicySatisfier {
       writeContent(file, (short) 5);
 
       // Change policy to COLD
-      dfs.setStoragePolicy(new Path(file), "COLD");
+      dfs.setStoragePolicy(new Path(file), COLD);
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -633,7 +638,7 @@ public class TestStoragePolicySatisfier {
       writeContent(file);
 
       // Change policy to ONE_SSD
-      dfs.setStoragePolicy(new Path(file), "ONE_SSD");
+      dfs.setStoragePolicy(new Path(file), ONE_SSD);
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -688,6 +693,77 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Tests that movements should not be assigned when there is no space in
+   * target DN.
+   */
+  @Test(timeout = 300000)
+  public void testChooseInSameDatanodeWithONESSDShouldNotChooseIfNoSpace()
+      throws Exception {
+    StorageType[][] diskTypes =
+        new StorageType[][]{{StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.DISK}};
+    config.setLong("dfs.block.size", 2 * DEFAULT_BLOCK_SIZE);
+    long dnCapacity = 1024 * DEFAULT_BLOCK_SIZE + (2 * DEFAULT_BLOCK_SIZE - 1);
+    try {
+      hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
+          storagesPerDatanode, dnCapacity);
+      dfs = hdfsCluster.getFileSystem();
+      writeContent(file);
+
+      // Change policy to ONE_SSD
+      dfs.setStoragePolicy(new Path(file), ONE_SSD);
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+      Path filePath = new Path("/testChooseInSameDatanode");
+      final FSDataOutputStream out =
+          dfs.create(filePath, false, 100, (short) 1, 2 * DEFAULT_BLOCK_SIZE);
+      try {
+        dfs.setStoragePolicy(filePath, ONE_SSD);
+        // Try to fill up SSD part by writing content
+        long remaining = dfs.getStatus().getRemaining() / (3 * 2);
+        for (int i = 0; i < remaining; i++) {
+          out.write(i);
+        }
+      } finally {
+        out.close();
+      }
+      hdfsCluster.triggerHeartbeats();
+      ArrayList<DataNode> dataNodes = hdfsCluster.getDataNodes();
+      // Temporarily disable heart beats, so that we can assert whether any
+      // items schedules for DNs even though DN's does not have space to write.
+      // Disabling heart beats can keep scheduled items on DatanodeDescriptor
+      // itself.
+      for (DataNode dataNode : dataNodes) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNode, true);
+      }
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+
+      // Wait for items to be processed
+      waitForAttemptedItems(1, 30000);
+
+      // Make sure no items assigned for movements
+      Set<DatanodeDescriptor> dns = hdfsCluster.getNamesystem()
+          .getBlockManager().getDatanodeManager().getDatanodes();
+      for (DatanodeDescriptor dd : dns) {
+        assertNull(dd.getBlocksToMoveStorages());
+      }
+
+      // Enable heart beats now
+      for (DataNode dataNode : dataNodes) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNode, false);
+      }
+      hdfsCluster.triggerHeartbeats();
+
+      DFSTestUtil.waitExpectedStorageType(file, StorageType.DISK, 3, 30000,
+          dfs);
+      DFSTestUtil.waitExpectedStorageType(file, StorageType.SSD, 0, 30000, dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
   private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
       throws IOException {
     ArrayList<DataNode> dns = hdfsCluster.getDataNodes();
@@ -769,8 +845,8 @@ public class TestStoragePolicySatisfier {
     // write to DISK
     final FSDataOutputStream out = dfs.create(new Path(fileName),
         replicatonFactor);
-    for (int i = 0; i < 1000; i++) {
-      out.writeChars("t");
+    for (int i = 0; i < 1024; i++) {
+      out.write(i);
     }
     out.close();
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type as source and destination in same datanode. Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type as source and destination in same datanode. Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/caea9fe6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/caea9fe6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/caea9fe6

Branch: refs/heads/HDFS-10285
Commit: caea9fe6f475d1d02dbff8d4f3623cd01431c02a
Parents: f736094
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Fri Jun 9 14:03:13 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:00 2017 +0530

----------------------------------------------------------------------
 .../server/namenode/StoragePolicySatisfier.java | 23 ++++++----
 .../namenode/TestStoragePolicySatisfier.java    | 44 ++++++++++++++++++++
 2 files changed, 58 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/caea9fe6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 9e2a4a0..1b2afa3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -501,15 +501,20 @@ public class StoragePolicySatisfier implements Runnable {
     // avoid choosing a target which already has this block.
     for (int i = 0; i < sourceWithStorageList.size(); i++) {
       StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
-      StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(blockInfo,
-          existingTypeNodePair.dn, expected);
-      if (chosenTarget != null) {
-        sourceNodes.add(existingTypeNodePair.dn);
-        sourceStorageTypes.add(existingTypeNodePair.storageType);
-        targetNodes.add(chosenTarget.dn);
-        targetStorageTypes.add(chosenTarget.storageType);
-        expected.remove(chosenTarget.storageType);
-        // TODO: We can increment scheduled block count for this node?
+
+      // Check whether the block replica is already placed in the expected
+      // storage type in this source datanode.
+      if (!expected.contains(existingTypeNodePair.storageType)) {
+        StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(
+            blockInfo, existingTypeNodePair.dn, expected);
+        if (chosenTarget != null) {
+          sourceNodes.add(existingTypeNodePair.dn);
+          sourceStorageTypes.add(existingTypeNodePair.storageType);
+          targetNodes.add(chosenTarget.dn);
+          targetStorageTypes.add(chosenTarget.storageType);
+          expected.remove(chosenTarget.storageType);
+          // TODO: We can increment scheduled block count for this node?
+        }
       }
       // To avoid choosing this excludeNodes as targets later
       excludeNodes.add(existingTypeNodePair.dn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caea9fe6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 8e08a1e..f1a4169 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -764,6 +764,50 @@ public class TestStoragePolicySatisfier {
   }
 
   /**
+   * If replica with expected storage type already exist in source DN then that
+   * DN should be skipped.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource()
+      throws Exception {
+    StorageType[][] diskTypes = new StorageType[][] {
+        {StorageType.DISK, StorageType.ARCHIVE},
+        {StorageType.DISK, StorageType.ARCHIVE},
+        {StorageType.DISK, StorageType.ARCHIVE}};
+
+    try {
+      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+          storagesPerDatanode, capacity);
+      dfs = hdfsCluster.getFileSystem();
+      // 1. Write two replica on disk
+      DFSTestUtil.createFile(dfs, new Path(file), DEFAULT_BLOCK_SIZE,
+          (short) 2, 0);
+      // 2. Change policy to COLD, so third replica will be written to ARCHIVE.
+      dfs.setStoragePolicy(new Path(file), "COLD");
+
+      // 3.Change replication factor to 3.
+      dfs.setReplication(new Path(file), (short) 3);
+
+      DFSTestUtil
+          .waitExpectedStorageType(file, StorageType.DISK, 2, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000,
+          dfs);
+
+      // 4. Change policy to HOT, so we can move the all block to DISK.
+      dfs.setStoragePolicy(new Path(file), "HOT");
+
+      // 4. Satisfy the policy.
+      dfs.satisfyStoragePolicy(new Path(file));
+
+      // 5. Block should move successfully .
+      DFSTestUtil
+          .waitExpectedStorageType(file, StorageType.DISK, 3, 30000, dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
    * Tests that movements should not be assigned when there is no space in
    * target DN.
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e6bc735
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e6bc735
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e6bc735

Branch: refs/heads/HDFS-10285
Commit: 3e6bc735392660d7163b7ad984a71eb31c31b771
Parents: 7367d54
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Fri Jul 14 22:36:09 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:22 2017 +0530

----------------------------------------------------------------------
 .../src/site/markdown/ArchivalStorage.md        | 51 ++++++++++++++++++--
 1 file changed, 48 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6bc735/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index f1895fc..668bb20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -95,8 +95,44 @@ The effective storage policy can be retrieved by the "[`storagepolicies -getStor
 
     The default storage type of a datanode storage location will be DISK if it does not have a storage type tagged explicitly.
 
-Mover - A New Data Migration Tool
----------------------------------
+Storage Policy Based Data Movement
+----------------------------------
+
+Setting a new storage policy on already existing file/dir will change the policy in Namespace, but it will not move the blocks physically across storage medias.
+Following 2 options will allow users to move the blocks based on new policy set. So, once user change/set to a new policy on file/directory, user should also perform one of the following options to achieve the desired data movement. Note that both options cannot be allowed to run simultaneously.
+
+### <u>S</u>torage <u>P</u>olicy <u>S</u>atisfier (SPS)
+
+When user changes the storage policy on a file/directory, user can call `HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new policy set.
+The SPS daemon thread runs along with namenode and periodically scans for the storage mismatches between new policy set and the physical blocks placed. This will only track the files/directories for which user invoked satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, then it will schedule block movement tasks to datanodes. A Coordinator DataNode(C-DN) will track all block movements associated to a file and notify to namenode about movement success/failure. If there are any failures in movement, the SPS will re-attempt by sending new block movement task.
+
+SPS can be activated and deactivated dynamically without restarting the Namenode.
+
+Detailed design documentation can be found at [Storage Policy Satisfier(SPS) (HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
+
+* **Note**: When user invokes `satisfyStoragePolicy()` API on a directory, SPS will consider the files which are immediate to that directory. Sub-directories won't be considered for satisfying the policy. Its user responsibility to call this API on directories recursively, to track all files under the sub tree.
+
+* HdfsAdmin API :
+        `public void satisfyStoragePolicy(final Path path) throws IOException`
+
+* Arguments :
+
+| | |
+|:---- |:---- |
+| `path` | A path which requires blocks storage movement. |
+
+####Configurations:
+
+*   **dfs.storage.policy.satisfier.activate** - Used to activate or deactivate SPS. Configuring true represents SPS is
+   activated and vice versa.
+
+*   **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to re-check the processed block storage movement
+   command results from Co-ordinator Datanode.
+
+*   **dfs.storage.policy.satisfier.self.retry.timeout.millis** - A timeout to retry if no block movement results reported from
+   Co-ordinator Datanode in this configured timeout.
+
+### Mover - A New Data Migration Tool
 
 A new data migration tool is added for archiving data. The tool is similar to Balancer. It periodically scans the files in HDFS to check if the block placement satisfies the storage policy. For the blocks violating the storage policy, it moves the replicas to a different storage type in order to fulfill the storage policy requirement. Note that it always tries to move block replicas within the same node whenever possible. If that is not possible (e.g. when a node doesn’t have the target storage type) then it will copy the block replicas to another node over the network.
 
@@ -113,6 +149,10 @@ A new data migration tool is added for archiving data. The tool is similar to Ba
 
 Note that, when both -p and -f options are omitted, the default path is the root directory.
 
+####Administrator notes:
+
+`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a Mover instance is already triggered and running, SPS will be deactivated while starting. In that case, administrator should make sure, Mover execution finished and then activate SPS again. Similarly when SPS activated already, Mover cannot be run. If administrator is looking to run Mover tool explicitly, then he/she should make sure to deactivate SPS first and then run Mover. Please look at the commands section to know how to activate or deactivate SPS dynamically.
+
 Storage Policy Commands
 -----------------------
 
@@ -171,7 +211,8 @@ Get the storage policy of a file or a directory.
 
 ### Satisfy Storage Policy
 
-Schedule blocks to move based on file/directory policy. This command applicable only to the given path and its immediate children. Sub-directories won't be considered for satisfying the policy.
+Schedule blocks to move based on file's/directory's current storage policy.
+Note: For directory case, it will consider immediate files under that directory and it will not consider sub directories recursively.
 
 * Command:
 
@@ -191,4 +232,8 @@ Check the running status of Storage Policy Satisfier in namenode. If it is runni
 
         hdfs storagepolicies -isSPSRunning
 
+### Activate or Deactivate SPS without restarting Namenode
+If administrator wants to activate or deactivate SPS feature while Namenode is running, first he/she needs to update the desired value(true or false) for the configuration item `dfs.storage.policy.satisfier.activate` in configuration file (`hdfs-site.xml`) and then run the following Namenode reconfig command
+
++       hdfs dfsadmin -reconfig namenode <host:ipc_port> start
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDFS-11029. [SPS]:Provide retry mechanism for the blocks which were failed while moving its storage at DNs. Contributed by Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-11029. [SPS]:Provide retry mechanism for the blocks which were failed while moving its storage at DNs. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/054d0922
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/054d0922
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/054d0922

Branch: refs/heads/HDFS-10285
Commit: 054d09225e0133f7f9f418330008ed75f26552bc
Parents: 7130518
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Thu Nov 10 10:09:45 2016 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:24 2017 +0530

----------------------------------------------------------------------
 .../BlockStorageMovementAttemptedItems.java     | 221 +++++++++++++++++++
 .../server/namenode/StoragePolicySatisfier.java |  24 +-
 .../TestBlockStorageMovementAttemptedItems.java | 101 +++++++++
 .../namenode/TestStoragePolicySatisfier.java    |   8 +-
 4 files changed, 343 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/054d0922/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
new file mode 100644
index 0000000..580d0d6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -0,0 +1,221 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.apache.hadoop.util.Time.monotonicNow;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.util.Daemon;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A monitor class for checking whether block storage movements finished or not.
+ * If block storage movement results from datanode indicates about the movement
+ * success, then it will just remove the entries from tracking. If it reports
+ * failure, then it will add back to needed block storage movements list. If no
+ * DN reports about movement for longer time, then such items will be retries
+ * automatically after timeout. The default timeout would be 30mins.
+ */
+public class BlockStorageMovementAttemptedItems {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class);
+  // A map holds the items which are already taken for blocks movements
+  // processing and sent to DNs.
+  private final Map<Long, Long> storageMovementAttemptedItems;
+  private final List<BlocksStorageMovementResult> storageMovementAttemptedResults;
+  private volatile boolean spsRunning = true;
+  private Daemon timerThread = null;
+  //
+  // It might take anywhere between 30 to 60 minutes before
+  // a request is timed out.
+  //
+  private long selfRetryTimeout = 30 * 60 * 1000;
+
+  //
+  // It might take anywhere between 5 to 10 minutes before
+  // a request is timed out.
+  //
+  private long checkTimeout = 5 * 60 * 1000; // minimum value
+  private BlockStorageMovementNeeded blockStorageMovementNeeded;
+
+  public BlockStorageMovementAttemptedItems(long timeoutPeriod,
+      long selfRetryTimeout,
+      BlockStorageMovementNeeded unsatisfiedStorageMovementFiles) {
+    if (timeoutPeriod > 0) {
+      this.checkTimeout = Math.min(checkTimeout, timeoutPeriod);
+    }
+
+    this.selfRetryTimeout = selfRetryTimeout;
+    this.blockStorageMovementNeeded = unsatisfiedStorageMovementFiles;
+    storageMovementAttemptedItems = new HashMap<>();
+    storageMovementAttemptedResults = new ArrayList<>();
+  }
+
+  /**
+   * Add item to block storage movement attempted items map which holds the
+   * tracking/blockCollection id versus time stamp.
+   *
+   * @param blockCollectionID
+   *          - tracking id / block collection id
+   */
+  public void add(Long blockCollectionID) {
+    synchronized (storageMovementAttemptedItems) {
+      storageMovementAttemptedItems.put(blockCollectionID, monotonicNow());
+    }
+  }
+
+  /**
+   * Add the trackIDBlocksStorageMovementResults to
+   * storageMovementAttemptedResults.
+   *
+   * @param blksMovementResults
+   */
+  public void addResults(BlocksStorageMovementResult[] blksMovementResults) {
+    if (blksMovementResults.length == 0) {
+      return;
+    }
+    synchronized (storageMovementAttemptedResults) {
+      storageMovementAttemptedResults
+          .addAll(Arrays.asList(blksMovementResults));
+    }
+  }
+
+  /**
+   * Starts the monitor thread.
+   */
+  void start() {
+    timerThread = new Daemon(new BlocksStorageMovementAttemptResultMonitor());
+    timerThread.setName("BlocksStorageMovementAttemptResultMonitor");
+    timerThread.start();
+  }
+
+  /**
+   * Stops the monitor thread.
+   */
+  public void stop() {
+    spsRunning = false;
+  }
+
+  /**
+   * A monitor class for checking block storage movement result and long waiting
+   * items periodically.
+   */
+  private class BlocksStorageMovementAttemptResultMonitor implements Runnable {
+    @Override
+    public void run() {
+      while (spsRunning) {
+        try {
+          blockStorageMovementResultCheck();
+          blocksStorageMovementUnReportedItemsCheck();
+          Thread.sleep(checkTimeout);
+        } catch (InterruptedException ie) {
+          LOG.debug("BlocksStorageMovementAttemptResultMonitor thread "
+              + "is interrupted.", ie);
+        }
+      }
+    }
+
+    private void blocksStorageMovementUnReportedItemsCheck() {
+      synchronized (storageMovementAttemptedItems) {
+        Iterator<Entry<Long, Long>> iter =
+            storageMovementAttemptedItems.entrySet().iterator();
+        long now = monotonicNow();
+        while (iter.hasNext()) {
+          Entry<Long, Long> entry = iter.next();
+          if (now > entry.getValue() + selfRetryTimeout) {
+            Long blockCollectionID = entry.getKey();
+            synchronized (storageMovementAttemptedResults) {
+              boolean exist = isExistInResult(blockCollectionID);
+              if (!exist) {
+                blockStorageMovementNeeded.add(blockCollectionID);
+              } else {
+                LOG.info("Blocks storage movement results for the"
+                    + " tracking id : " + blockCollectionID
+                    + " is reported from one of the co-ordinating datanode."
+                    + " So, the result will be processed soon.");
+              }
+              iter.remove();
+            }
+          }
+        }
+
+      }
+    }
+
+    private boolean isExistInResult(Long blockCollectionID) {
+      Iterator<BlocksStorageMovementResult> iter =
+          storageMovementAttemptedResults.iterator();
+      while (iter.hasNext()) {
+        BlocksStorageMovementResult storageMovementAttemptedResult =
+            iter.next();
+        if (storageMovementAttemptedResult.getTrackId() == blockCollectionID) {
+          return true;
+        }
+      }
+      return false;
+    }
+
+    private void blockStorageMovementResultCheck() {
+      synchronized (storageMovementAttemptedResults) {
+        Iterator<BlocksStorageMovementResult> iter =
+            storageMovementAttemptedResults.iterator();
+        while (iter.hasNext()) {
+          BlocksStorageMovementResult storageMovementAttemptedResult =
+              iter.next();
+          if (storageMovementAttemptedResult
+              .getStatus() == BlocksStorageMovementResult.Status.FAILURE) {
+            blockStorageMovementNeeded
+                .add(storageMovementAttemptedResult.getTrackId());
+            LOG.warn("Blocks storage movement results for the tracking id : "
+                + storageMovementAttemptedResult.getTrackId()
+                + " is reported from co-ordinating datanode, but result"
+                + " status is FAILURE. So, added for retry");
+          } else {
+            synchronized (storageMovementAttemptedItems) {
+              storageMovementAttemptedItems
+                  .remove(storageMovementAttemptedResult.getTrackId());
+            }
+            LOG.info("Blocks storage movement results for the tracking id : "
+                + storageMovementAttemptedResult.getTrackId()
+                + " is reported from co-ordinating datanode. "
+                + "The result status is SUCCESS.");
+          }
+          iter.remove(); // remove from results as processed above
+        }
+      }
+
+    }
+  }
+
+  @VisibleForTesting
+  public int resultsCount() {
+    return storageMovementAttemptedResults.size();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/054d0922/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index fbe686a..6fa9302 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -69,6 +69,7 @@ public class StoragePolicySatisfier implements Runnable {
   private final Namesystem namesystem;
   private final BlockManager blockManager;
   private final BlockStorageMovementNeeded storageMovementNeeded;
+  private final BlockStorageMovementAttemptedItems storageMovementsMonitor;
 
   public StoragePolicySatisfier(final Namesystem namesystem,
       final BlockStorageMovementNeeded storageMovementNeeded,
@@ -76,15 +77,22 @@ public class StoragePolicySatisfier implements Runnable {
     this.namesystem = namesystem;
     this.storageMovementNeeded = storageMovementNeeded;
     this.blockManager = blkManager;
+    // TODO: below selfRetryTimeout and checkTimeout can be configurable later
+    // Now, the default values of selfRetryTimeout and checkTimeout are 30mins
+    // and 5mins respectively
+    this.storageMovementsMonitor = new BlockStorageMovementAttemptedItems(
+        5 * 60 * 1000, 30 * 60 * 1000, storageMovementNeeded);
   }
 
   /**
-   * Start storage policy satisfier demon thread.
+   * Start storage policy satisfier demon thread. Also start block storage
+   * movements monitor for retry the attempts if needed.
    */
   public void start() {
     storagePolicySatisfierThread = new Daemon(this);
     storagePolicySatisfierThread.setName("StoragePolicySatisfier");
     storagePolicySatisfierThread.start();
+    this.storageMovementsMonitor.start();
   }
 
   /**
@@ -99,6 +107,7 @@ public class StoragePolicySatisfier implements Runnable {
       storagePolicySatisfierThread.join(3000);
     } catch (InterruptedException ie) {
     }
+    this.storageMovementsMonitor.stop();
   }
 
   @Override
@@ -108,6 +117,7 @@ public class StoragePolicySatisfier implements Runnable {
         Long blockCollectionID = storageMovementNeeded.get();
         if (blockCollectionID != null) {
           computeAndAssignStorageMismatchedBlocksToDNs(blockCollectionID);
+          this.storageMovementsMonitor.add(blockCollectionID);
         }
         // TODO: We can think to make this as configurable later, how frequently
         // we want to check block movements.
@@ -398,11 +408,6 @@ public class StoragePolicySatisfier implements Runnable {
     }
   }
 
-  // TODO: Temporarily keeping the results for assertion. This has to be
-  // revisited as part of HDFS-11029.
-  @VisibleForTesting
-  List<BlocksStorageMovementResult> results = new ArrayList<>();
-
   /**
    * Receives the movement results of collection of blocks associated to a
    * trackId.
@@ -415,6 +420,11 @@ public class StoragePolicySatisfier implements Runnable {
     if (blksMovementResults.length <= 0) {
       return;
     }
-    results.addAll(Arrays.asList(blksMovementResults));
+    storageMovementsMonitor.addResults(blksMovementResults);
+  }
+
+  @VisibleForTesting
+  BlockStorageMovementAttemptedItems getAttemptedItemsMonitor() {
+    return storageMovementsMonitor;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/054d0922/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
new file mode 100644
index 0000000..8c70d99
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.apache.hadoop.util.Time.monotonicNow;
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests that block storage movement attempt failures are reported from DN and
+ * processed them correctly or not.
+ */
+public class TestBlockStorageMovementAttemptedItems {
+
+  private BlockStorageMovementAttemptedItems bsmAttemptedItems = null;
+  private BlockStorageMovementNeeded unsatisfiedStorageMovementFiles = null;
+
+  @Before
+  public void setup() {
+    unsatisfiedStorageMovementFiles = new BlockStorageMovementNeeded();
+    bsmAttemptedItems = new BlockStorageMovementAttemptedItems(100, 500,
+        unsatisfiedStorageMovementFiles);
+    bsmAttemptedItems.start();
+  }
+
+  @After
+  public void teardown() {
+    if (bsmAttemptedItems != null) {
+      bsmAttemptedItems.stop();
+    }
+  }
+
+  private boolean checkItemMovedForRetry(Long item, long retryTimeout)
+      throws InterruptedException {
+    long stopTime = monotonicNow() + (retryTimeout * 2);
+    boolean isItemFound = false;
+    while (monotonicNow() < (stopTime)) {
+      Long ele = null;
+      while ((ele = unsatisfiedStorageMovementFiles.get()) != null) {
+        if (item.longValue() == ele.longValue()) {
+          isItemFound = true;
+          break;
+        }
+      }
+      if (!isItemFound) {
+        Thread.sleep(100);
+      } else {
+        break;
+      }
+    }
+    return isItemFound;
+  }
+
+  @Test(timeout = 30000)
+  public void testAddResultWithFailureResult() throws Exception {
+    Long item = new Long(1234);
+    bsmAttemptedItems.add(item);
+    bsmAttemptedItems.addResults(
+        new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
+            item.longValue(), BlocksStorageMovementResult.Status.FAILURE)});
+    assertTrue(checkItemMovedForRetry(item, 200));
+  }
+
+  @Test(timeout = 30000)
+  public void testAddResultWithSucessResult() throws Exception {
+    Long item = new Long(1234);
+    bsmAttemptedItems.add(item);
+    bsmAttemptedItems.addResults(
+        new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
+            item.longValue(), BlocksStorageMovementResult.Status.SUCCESS)});
+    assertFalse(checkItemMovedForRetry(item, 200));
+  }
+
+  @Test(timeout = 30000)
+  public void testNoResultAdded() throws Exception {
+    Long item = new Long(1234);
+    bsmAttemptedItems.add(item);
+    // After selfretry timeout, it should be added back for retry
+    assertTrue(checkItemMovedForRetry(item, 600));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/054d0922/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index cbfdfc6..6f5c717 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -174,8 +174,6 @@ public class TestStoragePolicySatisfier {
       waitExpectedStorageType(file, StorageType.SSD, distributedFS, 1, 30000);
       waitExpectedStorageType(file, StorageType.DISK, distributedFS, 2, 30000);
 
-      // TODO: Temporarily using the results from StoragePolicySatisfier class.
-      // This has to be revisited as part of HDFS-11029.
       waitForBlocksMovementResult(1, 30000);
     } finally {
       hdfsCluster.shutdown();
@@ -190,8 +188,10 @@ public class TestStoragePolicySatisfier {
       @Override
       public Boolean get() {
         LOG.info("expectedResultsCount={} actualResultsCount={}",
-            expectedResultsCount, sps.results.size());
-        return expectedResultsCount == sps.results.size();
+            expectedResultsCount,
+            sps.getAttemptedItemsMonitor().resultsCount());
+        return expectedResultsCount == sps.getAttemptedItemsMonitor()
+            .resultsCount();
       }
     }, 100, timeout);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running together. Contributed by Rakesh R.

Posted by ra...@apache.org.
HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running together. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7367d541
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7367d541
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7367d541

Branch: refs/heads/HDFS-10285
Commit: 7367d54143593b15d6b9acc19c03080d84dac52d
Parents: 9538801
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Wed Jul 12 17:56:56 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:18 2017 +0530

----------------------------------------------------------------------
 .../server/namenode/StoragePolicySatisfier.java | 53 +++++++++++---------
 .../namenode/TestStoragePolicySatisfier.java    |  3 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  5 +-
 3 files changed, 34 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7367d541/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 97cbf1b..00b4cd0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -128,6 +128,14 @@ public class StoragePolicySatisfier implements Runnable {
    */
   public synchronized void start(boolean reconfigStart) {
     isRunning = true;
+    if (checkIfMoverRunning()) {
+      isRunning = false;
+      LOG.error(
+          "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
+              + HdfsServerConstants.MOVER_ID_PATH.toString()
+              + " been opened. Maybe a Mover instance is running!");
+      return;
+    }
     if (reconfigStart) {
       LOG.info("Starting StoragePolicySatisfier, as admin requested to "
           + "activate it.");
@@ -211,20 +219,6 @@ public class StoragePolicySatisfier implements Runnable {
 
   @Override
   public void run() {
-    boolean isMoverRunning = !checkIfMoverRunning();
-    synchronized (this) {
-      isRunning = isMoverRunning;
-      if (!isRunning) {
-        // Stopping monitor thread and clearing queues as well
-        this.clearQueues();
-        this.storageMovementsMonitor.stopGracefully();
-        LOG.error(
-            "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-                + HdfsServerConstants.MOVER_ID_PATH.toString()
-                + " been opened. Maybe a Mover instance is running!");
-        return;
-      }
-    }
     while (namesystem.isRunning() && isRunning) {
       try {
         if (!namesystem.isInSafeMode()) {
@@ -274,25 +268,34 @@ public class StoragePolicySatisfier implements Runnable {
         // we want to check block movements.
         Thread.sleep(3000);
       } catch (Throwable t) {
-        synchronized (this) {
+        handleException(t);
+      }
+    }
+  }
+
+  private void handleException(Throwable t) {
+    // double check to avoid entering into synchronized block.
+    if (isRunning) {
+      synchronized (this) {
+        if (isRunning) {
           isRunning = false;
           // Stopping monitor thread and clearing queues as well
           this.clearQueues();
           this.storageMovementsMonitor.stopGracefully();
-        }
-        if (!namesystem.isRunning()) {
-          LOG.info("Stopping StoragePolicySatisfier.");
-          if (!(t instanceof InterruptedException)) {
-            LOG.info("StoragePolicySatisfier received an exception"
-                + " while shutting down.", t);
+          if (!namesystem.isRunning()) {
+            LOG.info("Stopping StoragePolicySatisfier.");
+            if (!(t instanceof InterruptedException)) {
+              LOG.info("StoragePolicySatisfier received an exception"
+                  + " while shutting down.", t);
+            }
+            return;
           }
-          break;
         }
-        LOG.error("StoragePolicySatisfier thread received runtime exception. "
-            + "Stopping Storage policy satisfier work", t);
-        break;
       }
     }
+    LOG.error("StoragePolicySatisfier thread received runtime exception. "
+        + "Stopping Storage policy satisfier work", t);
+    return;
   }
 
   private BlocksMovingAnalysisStatus analyseBlocksStorageMovementsAndAssignToDN(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7367d541/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 7127895..be7236b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -927,7 +927,8 @@ public class TestStoragePolicySatisfier {
       String fooDir = "/foo";
       client.mkdirs(fooDir, new FsPermission((short) 777), true);
       // set an EC policy on "/foo" directory
-      client.setErasureCodingPolicy(fooDir, null);
+      client.setErasureCodingPolicy(fooDir,
+          StripedFileTestUtil.getDefaultECPolicy().getName());
 
       // write file to fooDir
       final String testFile = "/foo/bar";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7367d541/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index 195c9e3..f905ead 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -323,6 +323,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
     conf.set(DFSConfigKeys
         .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
         "3000");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+        StripedFileTestUtil.getDefaultECPolicy().getName());
     initConfWithStripe(conf, defaultStripeBlockSize);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(numOfDatanodes)
@@ -346,7 +348,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
       Path barDir = new Path("/bar");
       fs.mkdirs(barDir);
       // set an EC policy on "/bar" directory
-      fs.setErasureCodingPolicy(barDir, null);
+      fs.setErasureCodingPolicy(barDir,
+          StripedFileTestUtil.getDefaultECPolicy().getName());
 
       // write file to barDir
       final Path fooFile = new Path("/bar/foo");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDFS-10800: [SPS]: Daemon thread in Namenode to find blocks placed in other storage than what the policy specifies. Contributed by Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-10800: [SPS]: Daemon thread in Namenode to find blocks placed in other storage than what the policy specifies. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/567977be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/567977be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/567977be

Branch: refs/heads/HDFS-10285
Commit: 567977be6930cf2a4d56b5848d5da8314bf67633
Parents: 5af54db
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Fri Sep 23 13:41:29 2016 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:03 2017 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    |  41 ++
 .../server/blockmanagement/BlockManager.java    |  20 +
 .../blockmanagement/DatanodeDescriptor.java     |  38 ++
 .../server/blockmanagement/DatanodeManager.java |   1 +
 .../datanode/StoragePolicySatisfyWorker.java    |  29 +-
 .../namenode/BlockStorageMovementNeeded.java    |  53 +++
 .../server/namenode/StoragePolicySatisfier.java | 397 +++++++++++++++++++
 .../protocol/BlockStorageMovementCommand.java   |  11 +-
 .../TestStoragePolicySatisfyWorker.java         |  24 +-
 .../namenode/TestStoragePolicySatisfier.java    | 209 ++++++++++
 10 files changed, 791 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 47e1c0d..506e28f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -48,6 +48,7 @@ import java.util.Collection;
 import java.util.Comparator;
 import java.util.Date;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -68,6 +69,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -1570,4 +1572,43 @@ public class DFSUtil {
         .createKeyProviderCryptoExtension(keyProvider);
     return cryptoProvider;
   }
+
+  /**
+   * Remove the overlap between the expected types and the existing types.
+   *
+   * @param expected
+   *          - Expected storage types list.
+   * @param existing
+   *          - Existing storage types list.
+   * @param ignoreNonMovable
+   *          ignore non-movable storage types by removing them from both
+   *          expected and existing storage type list to prevent non-movable
+   *          storage from being moved.
+   * @returns if the existing types or the expected types is empty after
+   *          removing the overlap.
+   */
+  public static boolean removeOverlapBetweenStorageTypes(
+      List<StorageType> expected,
+      List<StorageType> existing, boolean ignoreNonMovable) {
+    for (Iterator<StorageType> i = existing.iterator(); i.hasNext();) {
+      final StorageType t = i.next();
+      if (expected.remove(t)) {
+        i.remove();
+      }
+    }
+    if (ignoreNonMovable) {
+      removeNonMovable(existing);
+      removeNonMovable(expected);
+    }
+    return expected.isEmpty() || existing.isEmpty();
+  }
+
+  private static void removeNonMovable(List<StorageType> types) {
+    for (Iterator<StorageType> i = types.iterator(); i.hasNext();) {
+      final StorageType t = i.next();
+      if (!t.isMovable()) {
+        i.remove();
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6129db8..fbf62c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,6 +89,8 @@ import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementNeeded;
+import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@@ -415,6 +417,11 @@ public class BlockManager implements BlockStatsMXBean {
 
   private final BlockIdManager blockIdManager;
 
+  /** For satisfying block storage policies */
+  private final StoragePolicySatisfier sps;
+  private final BlockStorageMovementNeeded storageMovementNeeded =
+      new BlockStorageMovementNeeded();
+
   /** Minimum live replicas needed for the datanode to be transitioned
    * from ENTERING_MAINTENANCE to IN_MAINTENANCE.
    */
@@ -450,6 +457,7 @@ public class BlockManager implements BlockStatsMXBean {
         DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT)
         * 1000L);
 
+    sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this);
     blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
     this.maxCorruptFilesReturned = conf.getInt(
@@ -669,9 +677,11 @@ public class BlockManager implements BlockStatsMXBean {
     this.blockReportThread.start();
     mxBeanName = MBeans.register("NameNode", "BlockStats", this);
     bmSafeMode.activate(blockTotal);
+    sps.start();
   }
 
   public void close() {
+	sps.stop();
     bmSafeMode.close();
     try {
       redundancyThread.interrupt();
@@ -4832,4 +4842,14 @@ public class BlockManager implements BlockStatsMXBean {
     }
     return i;
   }
+
+  /**
+   * Set file block collection for which storage movement needed for its blocks.
+   *
+   * @param id
+   *          - file block collection id.
+   */
+  public void satisfyStoragePolicy(long id) {
+    storageMovementNeeded.add(id);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index d35894c..46a45ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -43,6 +43,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@@ -205,6 +207,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
   private final LightWeightHashSet<Block> invalidateBlocks =
       new LightWeightHashSet<>();
 
+  /** A queue of blocks for moving its storage placements by this datanode. */
+  private final Queue<List<BlockMovingInfo>> storageMovementBlocks =
+      new LinkedList<>();
+
   /* Variables for maintaining number of blocks scheduled to be written to
    * this storage. This count is approximate and might be slightly bigger
    * in case of errors (e.g. datanode does not report if an error occurs
@@ -1015,5 +1021,37 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
     return false;
   }
+
+  /**
+   * Add the block infos which needs to move its storage locations.
+   *
+   * @param storageMismatchedBlocks
+   *          - storage mismatched block infos
+   */
+  public void addBlocksToMoveStorage(
+      List<BlockMovingInfo> storageMismatchedBlocks) {
+    storageMovementBlocks.offer(storageMismatchedBlocks);
+  }
+
+  /**
+   * @return block infos which needs to move its storage locations.
+   */
+  public List<BlockMovingInfo> getBlocksToMoveStorages() {
+    return storageMovementBlocks.poll();
+  }
+
+  // TODO: we will remove this method once DN side handling integrated. We can
+  // convert the test to check real block movements instead of this ds.
+  @VisibleForTesting
+  public List<BlockMovingInfo> getStorageMovementPendingItems() {
+    List<BlockMovingInfo> flatList = new ArrayList<>();
+    Iterator<List<BlockMovingInfo>> iterator = storageMovementBlocks
+        .iterator();
+    while (iterator.hasNext()) {
+      List<BlockMovingInfo> next = iterator.next();
+      flatList.addAll(next);
+    }
+    return flatList;
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index c75bcea..13c730d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 6df4e81..fa408f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
@@ -125,7 +126,7 @@ public class StoragePolicySatisfyWorker {
     return moverThreadPool;
   }
 
-  public void processBlockMovingTasks(long trackID,
+  public void processBlockMovingTasks(long trackID, String blockPoolID,
       List<BlockMovingInfo> blockMovingInfos) {
     Future<Void> moveCallable = null;
     for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
@@ -133,13 +134,11 @@ public class StoragePolicySatisfyWorker {
           .getSources().length == blkMovingInfo.getTargets().length;
 
       for (int i = 0; i < blkMovingInfo.getSources().length; i++) {
-        BlockMovingTask blockMovingTask =
-            new BlockMovingTask(blkMovingInfo.getBlock(),
-            blkMovingInfo.getSources()[i],
-            blkMovingInfo.getTargets()[i],
+        BlockMovingTask blockMovingTask = new BlockMovingTask(
+            blkMovingInfo.getBlock(), blockPoolID,
+            blkMovingInfo.getSources()[i], blkMovingInfo.getTargets()[i],
             blkMovingInfo.getTargetStorageTypes()[i]);
-        moveCallable = moverExecutorCompletionService
-            .submit(blockMovingTask);
+        moveCallable = moverExecutorCompletionService.submit(blockMovingTask);
         moverTaskFutures.add(moveCallable);
       }
     }
@@ -163,14 +162,16 @@ public class StoragePolicySatisfyWorker {
    * given target.
    */
   private class BlockMovingTask implements Callable<Void> {
-    private final ExtendedBlock block;
+    private final Block block;
     private final DatanodeInfo source;
     private final DatanodeInfo target;
     private final StorageType targetStorageType;
+    private String blockPoolID;
 
-    BlockMovingTask(ExtendedBlock block, DatanodeInfo source,
+    BlockMovingTask(Block block, String blockPoolID, DatanodeInfo source,
         DatanodeInfo target, StorageType targetStorageType) {
       this.block = block;
+      this.blockPoolID = blockPoolID;
       this.source = source;
       this.target = target;
       this.targetStorageType = targetStorageType;
@@ -201,12 +202,12 @@ public class StoragePolicySatisfyWorker {
 
         OutputStream unbufOut = sock.getOutputStream();
         InputStream unbufIn = sock.getInputStream();
-
+        ExtendedBlock extendedBlock = new ExtendedBlock(blockPoolID, block);
         Token<BlockTokenIdentifier> accessToken = datanode.getBlockAccessToken(
-            block, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
+            extendedBlock, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
 
         DataEncryptionKeyFactory keyFactory = datanode
-            .getDataEncryptionKeyFactoryForBlock(block);
+            .getDataEncryptionKeyFactoryForBlock(extendedBlock);
         IOStreamPair saslStreams = datanode.getSaslClient().socketSend(sock,
             unbufOut, unbufIn, keyFactory, accessToken, target);
         unbufOut = saslStreams.out;
@@ -215,10 +216,10 @@ public class StoragePolicySatisfyWorker {
             new BufferedOutputStream(unbufOut, ioFileBufferSize));
         in = new DataInputStream(
             new BufferedInputStream(unbufIn, ioFileBufferSize));
-        sendRequest(out, block, accessToken, source, targetStorageType);
+        sendRequest(out, extendedBlock, accessToken, source, targetStorageType);
         receiveResponse(in);
 
-        LOG.debug(
+        LOG.info(
             "Successfully moved block:{} from src:{} to destin:{} for"
                 + " satisfying storageType:{}",
             block, source, target, targetStorageType);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
new file mode 100644
index 0000000..c916672
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.LinkedList;
+import java.util.Queue;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A Class to track the block collection IDs for which physical storage movement
+ * needed as per the Namespace and StorageReports from DN.
+ */
+@InterfaceAudience.Private
+public class BlockStorageMovementNeeded {
+  private final Queue<Long> storageMovementNeeded = new LinkedList<Long>();
+
+  /**
+   * Add the block collection id to tracking list for which storage movement
+   * expected if necessary.
+   *
+   * @param blockCollectionID
+   *          - block collection id, which is nothing but inode id.
+   */
+  public synchronized void add(Long blockCollectionID) {
+    storageMovementNeeded.add(blockCollectionID);
+  }
+
+  /**
+   * Gets the block collection id for which storage movements check necessary
+   * and make the movement if required.
+   *
+   * @return block collection ID
+   */
+  public synchronized Long get() {
+    return storageMovementNeeded.poll();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
new file mode 100644
index 0000000..b5aed37
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -0,0 +1,397 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.balancer.Matcher;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.util.Daemon;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Setting storagePolicy on a file after the file write will only update the new
+ * storage policy type in Namespace, but physical block storage movement will
+ * not happen until user runs "Mover Tool" explicitly for such files. The
+ * StoragePolicySatisfier Daemon thread implemented for addressing the case
+ * where users may want to physically move the blocks by HDFS itself instead of
+ * running mover tool explicitly. Just calling client API to
+ * satisfyStoragePolicy on a file/dir will automatically trigger to move its
+ * physical storage locations as expected in asynchronous manner. Here Namenode
+ * will pick the file blocks which are expecting to change its storages, then it
+ * will build the mapping of source block location and expected storage type and
+ * location to move. After that this class will also prepare commands to send to
+ * Datanode for processing the physical block movements.
+ */
+@InterfaceAudience.Private
+public class StoragePolicySatisfier implements Runnable {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(StoragePolicySatisfier.class);
+  private Daemon storagePolicySatisfierThread;
+  private final Namesystem namesystem;
+  private final BlockManager blockManager;
+  private final BlockStorageMovementNeeded storageMovementNeeded;
+
+  public StoragePolicySatisfier(final Namesystem namesystem,
+      final BlockStorageMovementNeeded storageMovementNeeded,
+      final BlockManager blkManager) {
+    this.namesystem = namesystem;
+    this.storageMovementNeeded = storageMovementNeeded;
+    this.blockManager = blkManager;
+  }
+
+  /**
+   * Start storage policy satisfier demon thread.
+   */
+  public void start() {
+    storagePolicySatisfierThread = new Daemon(this);
+    storagePolicySatisfierThread.setName("StoragePolicySatisfier");
+    storagePolicySatisfierThread.start();
+  }
+
+  /**
+   * Stop storage policy satisfier demon thread.
+   */
+  public void stop() {
+    if (storagePolicySatisfierThread == null) {
+      return;
+    }
+    storagePolicySatisfierThread.interrupt();
+    try {
+      storagePolicySatisfierThread.join(3000);
+    } catch (InterruptedException ie) {
+    }
+  }
+
+  @Override
+  public void run() {
+    while (namesystem.isRunning()) {
+      try {
+        Long blockCollectionID = storageMovementNeeded.get();
+        if (blockCollectionID != null) {
+          computeAndAssignStorageMismatchedBlocksToDNs(blockCollectionID);
+        }
+        // TODO: We can think to make this as configurable later, how frequently
+        // we want to check block movements.
+        Thread.sleep(3000);
+      } catch (Throwable t) {
+        if (!namesystem.isRunning()) {
+          LOG.info("Stopping StoragePolicySatisfier.");
+          if (!(t instanceof InterruptedException)) {
+            LOG.info("StoragePolicySatisfier received an exception"
+                + " while shutting down.", t);
+          }
+          break;
+        }
+        LOG.error("StoragePolicySatisfier thread received runtime exception. "
+            + "Stopping Storage policy satisfier work", t);
+        // TODO: Just break for now. Once we implement dynamic start/stop
+        // option, we can add conditions here when to break/terminate.
+        break;
+      }
+    }
+  }
+
+  private void computeAndAssignStorageMismatchedBlocksToDNs(
+      long blockCollectionID) {
+    BlockCollection blockCollection =
+        namesystem.getBlockCollection(blockCollectionID);
+    if (blockCollection == null) {
+      return;
+    }
+    byte existingStoragePolicyID = blockCollection.getStoragePolicyID();
+    BlockStoragePolicy existingStoragePolicy =
+        blockManager.getStoragePolicy(existingStoragePolicyID);
+    if (!blockCollection.getLastBlock().isComplete()) {
+      // Postpone, currently file is under construction
+      // So, should we add back? or leave it to user
+      return;
+    }
+
+    // First datanode will be chosen as the co-ordinator node for storage
+    // movements. Later this can be optimized if needed.
+    DatanodeDescriptor coordinatorNode = null;
+    BlockInfo[] blocks = blockCollection.getBlocks();
+    List<BlockMovingInfo> blockMovingInfos = new ArrayList<BlockMovingInfo>();
+    for (int i = 0; i < blocks.length; i++) {
+      BlockInfo blockInfo = blocks[i];
+      List<StorageType> expectedStorageTypes =
+          existingStoragePolicy.chooseStorageTypes(blockInfo.getReplication());
+      DatanodeStorageInfo[] storages = blockManager.getStorages(blockInfo);
+      StorageType[] storageTypes = new StorageType[storages.length];
+      for (int j = 0; j < storages.length; j++) {
+        DatanodeStorageInfo datanodeStorageInfo = storages[j];
+        StorageType storageType = datanodeStorageInfo.getStorageType();
+        storageTypes[j] = storageType;
+      }
+      List<StorageType> existing =
+          new LinkedList<StorageType>(Arrays.asList(storageTypes));
+      if (!DFSUtil.removeOverlapBetweenStorageTypes(expectedStorageTypes,
+          existing, true)) {
+        List<StorageTypeNodePair> sourceWithStorageMap =
+            new ArrayList<StorageTypeNodePair>();
+        List<DatanodeStorageInfo> existingBlockStorages =
+            new ArrayList<DatanodeStorageInfo>(Arrays.asList(storages));
+        for (StorageType existingType : existing) {
+          Iterator<DatanodeStorageInfo> iterator =
+              existingBlockStorages.iterator();
+          while (iterator.hasNext()) {
+            DatanodeStorageInfo datanodeStorageInfo = iterator.next();
+            StorageType storageType = datanodeStorageInfo.getStorageType();
+            if (storageType == existingType) {
+              iterator.remove();
+              sourceWithStorageMap.add(new StorageTypeNodePair(storageType,
+                  datanodeStorageInfo.getDatanodeDescriptor()));
+              break;
+            }
+          }
+        }
+
+        StorageTypeNodeMap locsForExpectedStorageTypes =
+            findTargetsForExpectedStorageTypes(expectedStorageTypes);
+
+        BlockMovingInfo blockMovingInfo =
+            findSourceAndTargetToMove(blockInfo, existing, sourceWithStorageMap,
+                expectedStorageTypes, locsForExpectedStorageTypes);
+        if (coordinatorNode == null) {
+          // For now, first datanode will be chosen as the co-ordinator. Later
+          // this can be optimized if needed.
+          coordinatorNode =
+              (DatanodeDescriptor) blockMovingInfo.getSources()[0];
+        }
+        blockMovingInfos.add(blockMovingInfo);
+      }
+    }
+
+    if (blockMovingInfos.size() < 1) {
+      // TODO: Major: handle this case. I think we need retry cases to
+      // be implemented. Idea is, if some files are not getting storage movement
+      // chances, then we can just retry limited number of times and exit.
+      return;
+    }
+    coordinatorNode.addBlocksToMoveStorage(blockMovingInfos);
+  }
+
+  /**
+   * Find the good target node for each source node for which block storages was
+   * misplaced.
+   *
+   * @param blockInfo
+   *          - Block
+   * @param existing
+   *          - Existing storage types of block
+   * @param sourceWithStorageList
+   *          - Source Datanode with storages list
+   * @param expected
+   *          - Expecting storages to move
+   * @param locsForExpectedStorageTypes
+   *          - Available DNs for expected storage types
+   * @return list of block source and target node pair
+   */
+  private BlockMovingInfo findSourceAndTargetToMove(BlockInfo blockInfo,
+      List<StorageType> existing,
+      List<StorageTypeNodePair> sourceWithStorageList,
+      List<StorageType> expected,
+      StorageTypeNodeMap locsForExpectedStorageTypes) {
+    List<DatanodeInfo> sourceNodes = new ArrayList<>();
+    List<StorageType> sourceStorageTypes = new ArrayList<>();
+    List<DatanodeInfo> targetNodes = new ArrayList<>();
+    List<StorageType> targetStorageTypes = new ArrayList<>();
+    List<DatanodeDescriptor> chosenNodes = new ArrayList<>();
+    for (int i = 0; i < sourceWithStorageList.size(); i++) {
+      StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
+      StorageTypeNodePair chosenTarget =
+          chooseTargetTypeInSameNode(existingTypeNodePair.dn, expected,
+              locsForExpectedStorageTypes, chosenNodes);
+
+      if (chosenTarget == null && blockManager.getDatanodeManager()
+          .getNetworkTopology().isNodeGroupAware()) {
+        chosenTarget = chooseTarget(blockInfo, existingTypeNodePair.dn,
+            expected, Matcher.SAME_NODE_GROUP, locsForExpectedStorageTypes,
+            chosenNodes);
+      }
+
+      // Then, match nodes on the same rack
+      if (chosenTarget == null) {
+        chosenTarget =
+            chooseTarget(blockInfo, existingTypeNodePair.dn, expected,
+                Matcher.SAME_RACK, locsForExpectedStorageTypes, chosenNodes);
+      }
+
+      if (chosenTarget == null) {
+        chosenTarget =
+            chooseTarget(blockInfo, existingTypeNodePair.dn, expected,
+                Matcher.ANY_OTHER, locsForExpectedStorageTypes, chosenNodes);
+      }
+      if (null != chosenTarget) {
+        sourceNodes.add(existingTypeNodePair.dn);
+        sourceStorageTypes.add(existingTypeNodePair.storageType);
+        targetNodes.add(chosenTarget.dn);
+        targetStorageTypes.add(chosenTarget.storageType);
+        chosenNodes.add(chosenTarget.dn);
+        // TODO: We can increment scheduled block count for this node?
+      } else {
+        // TODO: Failed to ChooseTargetNodes...So let just retry. Shall we
+        // proceed without this targets? Then what should be final result?
+        // How about pack empty target, means target node could not be chosen ,
+        // so result should be RETRY_REQUIRED from DN always.
+        // Log..unable to choose target node for source datanodeDescriptor
+        sourceNodes.add(existingTypeNodePair.dn);
+        sourceStorageTypes.add(existingTypeNodePair.storageType);
+        targetNodes.add(null);
+        targetStorageTypes.add(null);
+      }
+    }
+    BlockMovingInfo blkMovingInfo = new BlockMovingInfo(blockInfo,
+        sourceNodes.toArray(new DatanodeInfo[sourceNodes.size()]),
+        targetNodes.toArray(new DatanodeInfo[targetNodes.size()]),
+        sourceStorageTypes.toArray(new StorageType[sourceStorageTypes.size()]),
+        targetStorageTypes.toArray(new StorageType[targetStorageTypes.size()]));
+    return blkMovingInfo;
+  }
+
+  /**
+   * Choose the target storage within same Datanode if possible.
+   *
+   * @param locsForExpectedStorageTypes
+   * @param chosenNodes
+   */
+  private StorageTypeNodePair chooseTargetTypeInSameNode(
+      DatanodeDescriptor source, List<StorageType> targetTypes,
+      StorageTypeNodeMap locsForExpectedStorageTypes,
+      List<DatanodeDescriptor> chosenNodes) {
+    for (StorageType t : targetTypes) {
+      DatanodeStorageInfo chooseStorage4Block =
+          source.chooseStorage4Block(t, 0);
+      if (chooseStorage4Block != null) {
+        return new StorageTypeNodePair(t, source);
+      }
+    }
+    return null;
+  }
+
+  private StorageTypeNodePair chooseTarget(Block block,
+      DatanodeDescriptor source, List<StorageType> targetTypes, Matcher matcher,
+      StorageTypeNodeMap locsForExpectedStorageTypes,
+      List<DatanodeDescriptor> chosenNodes) {
+    for (StorageType t : targetTypes) {
+      List<DatanodeDescriptor> nodesWithStorages =
+          locsForExpectedStorageTypes.getNodesWithStorages(t);
+      Collections.shuffle(nodesWithStorages);
+      for (DatanodeDescriptor target : nodesWithStorages) {
+        if (!chosenNodes.contains(target) && matcher.match(
+            blockManager.getDatanodeManager().getNetworkTopology(), source,
+            target)) {
+          if (null != target.chooseStorage4Block(t, block.getNumBytes())) {
+            return new StorageTypeNodePair(t, target);
+          }
+        }
+      }
+    }
+    return null;
+  }
+
+  private static class StorageTypeNodePair {
+    public StorageType storageType = null;
+    public DatanodeDescriptor dn = null;
+
+    public StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) {
+      this.storageType = storageType;
+      this.dn = dn;
+    }
+  }
+
+  private StorageTypeNodeMap findTargetsForExpectedStorageTypes(
+      List<StorageType> expected) {
+    StorageTypeNodeMap targetMap = new StorageTypeNodeMap();
+    List<DatanodeDescriptor> reports = blockManager.getDatanodeManager()
+        .getDatanodeListForReport(DatanodeReportType.LIVE);
+    for (DatanodeDescriptor dn : reports) {
+      StorageReport[] storageReports = dn.getStorageReports();
+      for (StorageReport storageReport : storageReports) {
+        StorageType t = storageReport.getStorage().getStorageType();
+        if (expected.contains(t)) {
+          final long maxRemaining = getMaxRemaining(dn.getStorageReports(), t);
+          if (maxRemaining > 0L) {
+            targetMap.add(t, dn);
+          }
+        }
+      }
+    }
+    return targetMap;
+  }
+
+  private static long getMaxRemaining(StorageReport[] storageReports,
+      StorageType t) {
+    long max = 0L;
+    for (StorageReport r : storageReports) {
+      if (r.getStorage().getStorageType() == t) {
+        if (r.getRemaining() > max) {
+          max = r.getRemaining();
+        }
+      }
+    }
+    return max;
+  }
+
+  private static class StorageTypeNodeMap {
+    private final EnumMap<StorageType, List<DatanodeDescriptor>> typeNodeMap =
+        new EnumMap<StorageType, List<DatanodeDescriptor>>(StorageType.class);
+
+    private void add(StorageType t, DatanodeDescriptor dn) {
+      List<DatanodeDescriptor> nodesWithStorages = getNodesWithStorages(t);
+      LinkedList<DatanodeDescriptor> value = null;
+      if (nodesWithStorages == null) {
+        value = new LinkedList<DatanodeDescriptor>();
+        value.add(dn);
+        typeNodeMap.put(t, value);
+      } else {
+        nodesWithStorages.add(dn);
+      }
+    }
+
+    /**
+     * @param type
+     *          - Storage type
+     * @return datanodes which has the given storage type
+     */
+    private List<DatanodeDescriptor> getNodesWithStorages(StorageType type) {
+      return typeNodeMap.get(type);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
index 42ba265..c1ab800 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.protocol;
 import java.util.Arrays;
 
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 
 /**
  * A BlockStorageMovementCommand is an instruction to a DataNode to move the
@@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
  * NameNode about the movement status.
  */
 public class BlockStorageMovementCommand extends DatanodeCommand {
-
   // TODO: constructor needs to be refined based on the block movement data
   // structure.
   BlockStorageMovementCommand(int action) {
@@ -46,13 +45,13 @@ public class BlockStorageMovementCommand extends DatanodeCommand {
    * Stores block to storage info that can be used for block movement.
    */
   public static class BlockMovingInfo {
-    private ExtendedBlock blk;
+    private Block blk;
     private DatanodeInfo[] sourceNodes;
     private StorageType[] sourceStorageTypes;
     private DatanodeInfo[] targetNodes;
     private StorageType[] targetStorageTypes;
 
-    public BlockMovingInfo(ExtendedBlock block,
+    public BlockMovingInfo(Block block,
         DatanodeInfo[] sourceDnInfos, DatanodeInfo[] targetDnInfos,
         StorageType[] srcStorageTypes, StorageType[] targetStorageTypes) {
       this.blk = block;
@@ -62,11 +61,11 @@ public class BlockStorageMovementCommand extends DatanodeCommand {
       this.targetStorageTypes = targetStorageTypes;
     }
 
-    public void addBlock(ExtendedBlock block) {
+    public void addBlock(Block block) {
       this.blk = block;
     }
 
-    public ExtendedBlock getBlock() {
+    public Block getBlock() {
       return this.blk;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
index c722306..d803f1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
@@ -71,14 +71,14 @@ public class TestStoragePolicySatisfyWorker {
   public void testMoveSingleBlockToAnotherDatanode() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     initConf(conf);
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(4)
-        .storageTypes(
-            new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
-                {StorageType.DISK, StorageType.ARCHIVE},
-                {StorageType.DISK, StorageType.ARCHIVE},
-                {StorageType.DISK, StorageType.ARCHIVE}})
-        .build();
+    final MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(4)
+            .storageTypes(
+                new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+                    {StorageType.DISK, StorageType.ARCHIVE},
+                    {StorageType.ARCHIVE, StorageType.ARCHIVE},
+                    {StorageType.ARCHIVE, StorageType.ARCHIVE}})
+            .build();
     try {
       cluster.waitActive();
       final DistributedFileSystem dfs = cluster.getFileSystem();
@@ -108,12 +108,12 @@ public class TestStoragePolicySatisfyWorker {
           src);
       List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
       BlockMovingInfo blockMovingInfo = prepareBlockMovingInfo(
-          lb.getBlock(), lb.getLocations()[0], targetDnInfo,
+          lb.getBlock().getLocalBlock(), lb.getLocations()[0], targetDnInfo,
           lb.getStorageTypes()[0], StorageType.ARCHIVE);
       blockMovingInfos.add(blockMovingInfo);
       INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
       worker.processBlockMovingTasks(inode.getId(),
-          blockMovingInfos);
+          cluster.getNamesystem().getBlockPoolId(), blockMovingInfos);
       cluster.triggerHeartbeats();
 
       // Wait till NameNode notified about the block location details
@@ -150,7 +150,7 @@ public class TestStoragePolicySatisfyWorker {
     }, 100, timeout);
   }
 
-  BlockMovingInfo prepareBlockMovingInfo(ExtendedBlock block,
+  BlockMovingInfo prepareBlockMovingInfo(Block block,
       DatanodeInfo src, DatanodeInfo destin, StorageType storageType,
       StorageType targetStorageType) {
     return new BlockMovingInfo(block, new DatanodeInfo[] {src},

http://git-wip-us.apache.org/repos/asf/hadoop/blob/567977be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
new file mode 100644
index 0000000..b61814d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
+ * moved and finding its suggested target locations to move.
+ */
+public class TestStoragePolicySatisfier {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
+  private final Configuration config = new HdfsConfiguration();
+  private StorageType[][] allDiskTypes =
+      new StorageType[][]{{StorageType.DISK, StorageType.DISK},
+          {StorageType.DISK, StorageType.DISK},
+          {StorageType.DISK, StorageType.DISK}};
+  private MiniDFSCluster hdfsCluster = null;
+  final private int numOfDatanodes = 3;
+  final private int storagesPerDatanode = 2;
+  final private long capacity = 2 * 256 * 1024 * 1024;
+  final private String file = "/testMoveWhenStoragePolicyNotSatisfying";
+  private DistributedFileSystem distributedFS = null;
+
+  @Before
+  public void setUp() throws IOException {
+    config.setLong("dfs.block.size", 1024);
+    hdfsCluster = startCluster(config, allDiskTypes, numOfDatanodes,
+        storagesPerDatanode, capacity);
+    distributedFS = hdfsCluster.getFileSystem();
+    writeContent(distributedFS, file);
+  }
+
+  @Test(timeout = 300000)
+  public void testWhenStoragePolicySetToCOLD()
+      throws Exception {
+
+    try {
+      // Change policy to ALL_SSD
+      distributedFS.setStoragePolicy(new Path(file), "COLD");
+      Set<DatanodeDescriptor> previousNodes =
+          hdfsCluster.getNameNode().getNamesystem().getBlockManager()
+              .getDatanodeManager().getDatanodes();
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+      startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+
+      hdfsCluster.triggerHeartbeats();
+      // Wait till namenode notified about the block location details
+      waitExpectedStorageType(StorageType.ARCHIVE, distributedFS, previousNodes,
+          6, 30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  @Test(timeout = 300000)
+  public void testWhenStoragePolicySetToALLSSD()
+      throws Exception {
+    try {
+      // Change policy to ALL_SSD
+      distributedFS.setStoragePolicy(new Path(file), "ALL_SSD");
+      Set<DatanodeDescriptor> previousNodes =
+          hdfsCluster.getNameNode().getNamesystem().getBlockManager()
+              .getDatanodeManager().getDatanodes();
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK},
+              {StorageType.SSD, StorageType.DISK},
+              {StorageType.SSD, StorageType.DISK}};
+
+      // Making sure SDD based nodes added to cluster. Adding SSD based
+      // datanodes.
+      startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      hdfsCluster.triggerHeartbeats();
+      // Wait till StorgePolicySatisfier Identified that block to move to SSD
+      // areas
+      waitExpectedStorageType(StorageType.SSD, distributedFS, previousNodes, 6,
+          30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  private void writeContent(final DistributedFileSystem dfs,
+      final String fileName) throws IOException {
+    // write to DISK
+    final FSDataOutputStream out = dfs.create(new Path(fileName));
+    for (int i = 0; i < 1000; i++) {
+      out.writeChars("t");
+    }
+    out.close();
+  }
+
+  private void startAdditionalDNs(final Configuration conf,
+      int newNodesRequired, int existingNodesNum, StorageType[][] newTypes,
+      int storagesPerDatanode, long capacity, final MiniDFSCluster cluster)
+          throws IOException {
+    long[][] capacities;
+    existingNodesNum += newNodesRequired;
+    capacities = new long[newNodesRequired][storagesPerDatanode];
+    for (int i = 0; i < newNodesRequired; i++) {
+      for (int j = 0; j < storagesPerDatanode; j++) {
+        capacities[i][j] = capacity;
+      }
+    }
+
+    cluster.startDataNodes(conf, newNodesRequired, newTypes, true, null, null,
+        null, capacities, null, false, false, false, null);
+    cluster.triggerHeartbeats();
+  }
+
+  private MiniDFSCluster startCluster(final Configuration conf,
+      StorageType[][] storageTypes, int numberOfDatanodes, int storagesPerDn,
+      long nodeCapacity) throws IOException {
+    long[][] capacities = new long[numberOfDatanodes][storagesPerDn];
+    for (int i = 0; i < numberOfDatanodes; i++) {
+      for (int j = 0; j < storagesPerDn; j++) {
+        capacities[i][j] = nodeCapacity;
+      }
+    }
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numberOfDatanodes).storagesPerDatanode(storagesPerDn)
+        .storageTypes(storageTypes).storageCapacities(capacities).build();
+    cluster.waitActive();
+    return cluster;
+  }
+
+  // TODO: this assertion can be changed to end to end based assertion later
+  // when DN side processing work integrated to this work.
+  private void waitExpectedStorageType(final StorageType expectedStorageType,
+      final DistributedFileSystem dfs,
+      final Set<DatanodeDescriptor> previousNodes, int expectedArchiveCount,
+      int timeout) throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        Iterator<DatanodeDescriptor> iterator = previousNodes.iterator();
+        int archiveCount = 0;
+        while (iterator.hasNext()) {
+          DatanodeDescriptor dn = iterator.next();
+          List<BlockMovingInfo> pendingItemsToMove =
+              dn.getStorageMovementPendingItems();
+          for (BlockMovingInfo blkInfoToMoveStorage : pendingItemsToMove) {
+            StorageType[] targetStorageTypes =
+                blkInfoToMoveStorage.getTargetStorageTypes();
+            for (StorageType storageType : targetStorageTypes) {
+              if (storageType == expectedStorageType) {
+                archiveCount++;
+              }
+            }
+          }
+        }
+        LOG.info(
+            expectedStorageType + " replica count, expected={} and actual={}",
+            expectedArchiveCount, archiveCount);
+        return expectedArchiveCount == archiveCount;
+      }
+    }, 100, timeout);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebb0430a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebb0430a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebb0430a

Branch: refs/heads/HDFS-10285
Commit: ebb0430a5237574aad37d276672a2e4117bfc4ec
Parents: 3e6bc73
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Mon Jul 17 22:40:03 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:26 2017 +0530

----------------------------------------------------------------------
 .../server/namenode/TestStoragePolicySatisfier.java |  9 +++++----
 .../TestStoragePolicySatisfierWithStripedFile.java  | 16 ++++++++--------
 2 files changed, 13 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebb0430a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index be7236b..10ceae7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -1025,12 +1025,13 @@ public class TestStoragePolicySatisfier {
       list.add(cluster.stopDataNode(0));
       list.add(cluster.stopDataNode(0));
       cluster.restartNameNodes();
-      cluster.restartDataNode(list.get(0), true);
-      cluster.restartDataNode(list.get(1), true);
+      cluster.restartDataNode(list.get(0), false);
+      cluster.restartDataNode(list.get(1), false);
       cluster.waitActive();
       fs.satisfyStoragePolicy(filePath);
-      Thread.sleep(3000 * 6);
-      cluster.restartDataNode(list.get(2), true);
+      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+          StorageType.ARCHIVE, 2, 30000, cluster.getFileSystem());
+      cluster.restartDataNode(list.get(2), false);
       DFSTestUtil.waitExpectedStorageType(filePath.toString(),
           StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebb0430a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index f905ead..c070113 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -308,8 +308,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
    */
   @Test(timeout = 300000)
   public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
-    // start 10 datanodes
-    int numOfDatanodes = 10;
+    // start 9 datanodes
+    int numOfDatanodes = 9;
     int storagesPerDatanode = 2;
     long capacity = 20 * defaultStripeBlockSize;
     long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -338,7 +338,6 @@ public class TestStoragePolicySatisfierWithStripedFile {
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
-            {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE}})
         .storageCapacities(capacities)
         .build();
@@ -366,15 +365,16 @@ public class TestStoragePolicySatisfierWithStripedFile {
       }
       cluster.restartNameNodes();
       // Restart half datanodes
-      for (int i = 0; i < numOfDatanodes / 2; i++) {
-        cluster.restartDataNode(list.get(i), true);
+      for (int i = 0; i < 5; i++) {
+        cluster.restartDataNode(list.get(i), false);
       }
       cluster.waitActive();
       fs.satisfyStoragePolicy(fooFile);
-      Thread.sleep(3000 * 6);
+      DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
+          StorageType.ARCHIVE, 5, 30000, cluster.getFileSystem());
       //Start reaming datanodes
-      for (int i = numOfDatanodes - 1; i > numOfDatanodes / 2; i--) {
-        cluster.restartDataNode(list.get(i), true);
+      for (int i = numOfDatanodes - 1; i >= 5; i--) {
+        cluster.restartDataNode(list.get(i), false);
       }
       // verify storage types and locations.
       waitExpectedStorageType(cluster, fooFile.toString(), fileLen,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: HDFS-11572. [SPS]: SPS should clean Xattrs when no blocks required to satisfy for a file. Contributed by Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-11572. [SPS]: SPS should clean Xattrs when no blocks required to satisfy for a file. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b03ec6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b03ec6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b03ec6e

Branch: refs/heads/HDFS-10285
Commit: 0b03ec6ee7e6afe2776e99d5e6dc2a00bc72cff4
Parents: ade0d04
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Thu Apr 20 23:14:36 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:41 2017 +0530

----------------------------------------------------------------------
 .../BlockStorageMovementAttemptedItems.java     |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 116 ++++++++++++++-----
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  35 ++++++
 .../TestPersistentStoragePolicySatisfier.java   |  52 +++++----
 .../namenode/TestStoragePolicySatisfier.java    |  76 ++++++++++++
 5 files changed, 225 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b03ec6e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index f2406da..bf7859c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -333,7 +333,7 @@ public class BlockStorageMovementAttemptedItems {
                   + "doesn't exists in storageMovementAttemptedItems list",
                   storageMovementAttemptedResult.getTrackId());
               // Remove xattr for the track id.
-              this.sps.notifyBlkStorageMovementFinished(
+              this.sps.postBlkStorageMovementCleanup(
                   storageMovementAttemptedResult.getTrackId());
             }
             break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b03ec6e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 8be0a2a..3b20314 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -79,6 +79,27 @@ public class StoragePolicySatisfier implements Runnable {
   private final BlockStorageMovementAttemptedItems storageMovementsMonitor;
   private volatile boolean isRunning = false;
 
+  /**
+   * Represents the collective analysis status for all blocks.
+   */
+  private enum BlocksMovingAnalysisStatus {
+    // Represents that, the analysis skipped due to some conditions. A such
+    // condition is if block collection is in incomplete state.
+    ANALYSIS_SKIPPED_FOR_RETRY,
+    // Represents that, all block storage movement needed blocks found its
+    // targets.
+    ALL_BLOCKS_TARGETS_PAIRED,
+    // Represents that, only fewer or none of the block storage movement needed
+    // block found its eligible targets.
+    FEW_BLOCKS_TARGETS_PAIRED,
+    // Represents that, none of the blocks found for block storage movements.
+    BLOCKS_ALREADY_SATISFIED,
+    // Represents that, the analysis skipped due to some conditions.
+    // Example conditions are if no blocks really exists in block collection or
+    // if analysis is not required on ec files with unsuitable storage policies
+    BLOCKS_TARGET_PAIRING_SKIPPED;
+  }
+
   public StoragePolicySatisfier(final Namesystem namesystem,
       final BlockStorageMovementNeeded storageMovementNeeded,
       final BlockManager blkManager, Configuration conf) {
@@ -208,10 +229,31 @@ public class StoragePolicySatisfier implements Runnable {
                 namesystem.getBlockCollection(blockCollectionID);
             // Check blockCollectionId existence.
             if (blockCollection != null) {
-              boolean allBlockLocsAttemptedToSatisfy =
-                  computeAndAssignStorageMismatchedBlocksToDNs(blockCollection);
-              this.storageMovementsMonitor
-                  .add(blockCollectionID, allBlockLocsAttemptedToSatisfy);
+              BlocksMovingAnalysisStatus status =
+                  analyseBlocksStorageMovementsAndAssignToDN(blockCollection);
+              switch (status) {
+              // Just add to monitor, so it will be retried after timeout
+              case ANALYSIS_SKIPPED_FOR_RETRY:
+                // Just add to monitor, so it will be tracked for result and
+                // be removed on successful storage movement result.
+              case ALL_BLOCKS_TARGETS_PAIRED:
+                this.storageMovementsMonitor.add(blockCollectionID, true);
+                break;
+              // Add to monitor with allBlcoksAttemptedToSatisfy flag false, so
+              // that it will be tracked and still it will be consider for retry
+              // as analysis was not found targets for storage movement blocks.
+              case FEW_BLOCKS_TARGETS_PAIRED:
+                this.storageMovementsMonitor.add(blockCollectionID, false);
+                break;
+              // Just clean Xattrs
+              case BLOCKS_TARGET_PAIRING_SKIPPED:
+              case BLOCKS_ALREADY_SATISFIED:
+              default:
+                LOG.info("Block analysis skipped or blocks already satisfied"
+                    + " with storages. So, Cleaning up the Xattrs.");
+                postBlkStorageMovementCleanup(blockCollectionID);
+                break;
+              }
             }
           }
         }
@@ -235,15 +277,15 @@ public class StoragePolicySatisfier implements Runnable {
         }
         LOG.error("StoragePolicySatisfier thread received runtime exception. "
             + "Stopping Storage policy satisfier work", t);
-        // TODO: Just break for now. Once we implement dynamic start/stop
-        // option, we can add conditions here when to break/terminate.
         break;
       }
     }
   }
 
-  private boolean computeAndAssignStorageMismatchedBlocksToDNs(
+  private BlocksMovingAnalysisStatus analyseBlocksStorageMovementsAndAssignToDN(
       BlockCollection blockCollection) {
+    BlocksMovingAnalysisStatus status =
+        BlocksMovingAnalysisStatus.BLOCKS_ALREADY_SATISFIED;
     byte existingStoragePolicyID = blockCollection.getStoragePolicyID();
     BlockStoragePolicy existingStoragePolicy =
         blockManager.getStoragePolicy(existingStoragePolicyID);
@@ -252,21 +294,20 @@ public class StoragePolicySatisfier implements Runnable {
       // So, should we add back? or leave it to user
       LOG.info("BlockCollectionID: {} file is under construction. So, postpone"
           + " this to the next retry iteration", blockCollection.getId());
-      return true;
+      return BlocksMovingAnalysisStatus.ANALYSIS_SKIPPED_FOR_RETRY;
     }
 
     // First datanode will be chosen as the co-ordinator node for storage
     // movements. Later this can be optimized if needed.
     DatanodeDescriptor coordinatorNode = null;
     BlockInfo[] blocks = blockCollection.getBlocks();
+    if (blocks.length == 0) {
+      LOG.info("BlockCollectionID: {} file is not having any blocks."
+          + " So, skipping the analysis.", blockCollection.getId());
+      return BlocksMovingAnalysisStatus.BLOCKS_TARGET_PAIRING_SKIPPED;
+    }
     List<BlockMovingInfo> blockMovingInfos = new ArrayList<BlockMovingInfo>();
 
-    // True value represents that, SPS is able to find matching target nodes
-    // to satisfy storage type for all the blocks locations of the given
-    // blockCollection. A false value represents that, blockCollection needed
-    // retries to satisfy the storage policy for some of the block locations.
-    boolean foundMatchingTargetNodesForAllBlocks = true;
-
     for (int i = 0; i < blocks.length; i++) {
       BlockInfo blockInfo = blocks[i];
       List<StorageType> expectedStorageTypes;
@@ -283,19 +324,38 @@ public class StoragePolicySatisfier implements Runnable {
           LOG.warn("The storage policy " + existingStoragePolicy.getName()
               + " is not suitable for Striped EC files. "
               + "So, ignoring to move the blocks");
-          return false;
+          return BlocksMovingAnalysisStatus.BLOCKS_TARGET_PAIRING_SKIPPED;
         }
       } else {
         expectedStorageTypes = existingStoragePolicy
             .chooseStorageTypes(blockInfo.getReplication());
       }
-      foundMatchingTargetNodesForAllBlocks |= computeBlockMovingInfos(
-          blockMovingInfos, blockInfo, expectedStorageTypes);
+
+      DatanodeStorageInfo[] storages = blockManager.getStorages(blockInfo);
+      StorageType[] storageTypes = new StorageType[storages.length];
+      for (int j = 0; j < storages.length; j++) {
+        DatanodeStorageInfo datanodeStorageInfo = storages[j];
+        StorageType storageType = datanodeStorageInfo.getStorageType();
+        storageTypes[j] = storageType;
+      }
+      List<StorageType> existing =
+          new LinkedList<StorageType>(Arrays.asList(storageTypes));
+      if (!DFSUtil.removeOverlapBetweenStorageTypes(expectedStorageTypes,
+          existing, true)) {
+        boolean computeStatus = computeBlockMovingInfos(blockMovingInfos,
+            blockInfo, expectedStorageTypes, existing, storages);
+        if (computeStatus
+            && status != BlocksMovingAnalysisStatus.FEW_BLOCKS_TARGETS_PAIRED) {
+          status = BlocksMovingAnalysisStatus.ALL_BLOCKS_TARGETS_PAIRED;
+        } else {
+          status = BlocksMovingAnalysisStatus.FEW_BLOCKS_TARGETS_PAIRED;
+        }
+      }
     }
 
     assignBlockMovingInfosToCoordinatorDn(blockCollection.getId(),
         blockMovingInfos, coordinatorNode);
-    return foundMatchingTargetNodesForAllBlocks;
+    return status;
   }
 
   /**
@@ -311,22 +371,18 @@ public class StoragePolicySatisfier implements Runnable {
    *          - block details
    * @param expectedStorageTypes
    *          - list of expected storage type to satisfy the storage policy
+   * @param existing
+   *          - list to get existing storage types
+   * @param storages
+   *          - available storages
    * @return false if some of the block locations failed to find target node to
    *         satisfy the storage policy, true otherwise
    */
   private boolean computeBlockMovingInfos(
       List<BlockMovingInfo> blockMovingInfos, BlockInfo blockInfo,
-      List<StorageType> expectedStorageTypes) {
+      List<StorageType> expectedStorageTypes, List<StorageType> existing,
+      DatanodeStorageInfo[] storages) {
     boolean foundMatchingTargetNodesForBlock = true;
-    DatanodeStorageInfo[] storages = blockManager.getStorages(blockInfo);
-    StorageType[] storageTypes = new StorageType[storages.length];
-    for (int j = 0; j < storages.length; j++) {
-      DatanodeStorageInfo datanodeStorageInfo = storages[j];
-      StorageType storageType = datanodeStorageInfo.getStorageType();
-      storageTypes[j] = storageType;
-    }
-    List<StorageType> existing =
-        new LinkedList<StorageType>(Arrays.asList(storageTypes));
     if (!DFSUtil.removeOverlapBetweenStorageTypes(expectedStorageTypes,
         existing, true)) {
       List<StorageTypeNodePair> sourceWithStorageMap =
@@ -756,7 +812,7 @@ public class StoragePolicySatisfier implements Runnable {
     Long id;
     while ((id = storageMovementNeeded.get()) != null) {
       try {
-        notifyBlkStorageMovementFinished(id);
+        postBlkStorageMovementCleanup(id);
       } catch (IOException ie) {
         LOG.warn("Failed to remove SPS "
             + "xattr for collection id " + id, ie);
@@ -771,7 +827,7 @@ public class StoragePolicySatisfier implements Runnable {
    * @param trackId track id i.e., block collection id.
    * @throws IOException
    */
-  public void notifyBlkStorageMovementFinished(long trackId)
+  public void postBlkStorageMovementCleanup(long trackId)
       throws IOException {
     this.namesystem.getFSDirectory().removeSPSXattr(trackId);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b03ec6e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index aea4dac..93b7169 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -99,8 +99,11 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
@@ -150,9 +153,12 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.namenode.XAttrStorage;
 import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -168,6 +174,7 @@ import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -2322,4 +2329,32 @@ public class DFSTestUtil {
       }
     }, 500, timeout);
   }
+
+  /**
+   * Waits for removal of a specified Xattr on a specified file.
+   *
+   * @param srcPath
+   *          file name.
+   * @param xattr
+   *          name of the extended attribute.
+   * @param ns
+   *          Namesystem
+   * @param timeout
+   *          max wait time
+   * @throws Exception
+   */
+  public static void waitForXattrRemoved(String srcPath, String xattr,
+      Namesystem ns, int timeout) throws TimeoutException, InterruptedException,
+          UnresolvedLinkException, AccessControlException,
+          ParentNotDirectoryException {
+    final INode inode = ns.getFSDirectory().getINode(srcPath);
+    final XAttr satisfyXAttr = XAttrHelper.buildXAttr(xattr);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+        return !existingXAttrs.contains(satisfyXAttr);
+      }
+    }, 100, timeout);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b03ec6e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 8c3359a..41c272c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -20,22 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.junit.Test;
 
 import java.io.IOException;
-import java.util.List;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
 
 /**
  * Test persistence of satisfying files/directories.
@@ -341,15 +337,9 @@ public class TestPersistentStoragePolicySatisfier {
       DFSTestUtil.waitExpectedStorageType(
           testFileName, StorageType.DISK, 2, timeout, fs);
 
-      // Make sure that SPS xattr has been removed.
-      int retryTime = 0;
-      while (retryTime < 30) {
-        if (!fileContainsSPSXAttr(testFile)) {
-          break;
-        }
-        Thread.sleep(minCheckTimeout);
-        retryTime += 1;
-      }
+      // Make sure satisfy xattr has been removed.
+      DFSTestUtil.waitForXattrRemoved(testFileName,
+          XATTR_SATISFY_STORAGE_POLICY, cluster.getNamesystem(), 30000);
 
       fs.setStoragePolicy(testFile, COLD);
       fs.satisfyStoragePolicy(testFile);
@@ -379,7 +369,8 @@ public class TestPersistentStoragePolicySatisfier {
       cluster.getNamesystem().getBlockManager().deactivateSPS();
 
       // Make sure satisfy xattr has been removed.
-      assertFalse(fileContainsSPSXAttr(testFile));
+      DFSTestUtil.waitForXattrRemoved(testFileName,
+          XATTR_SATISFY_STORAGE_POLICY, cluster.getNamesystem(), 30000);
 
     } finally {
       clusterShutdown();
@@ -387,18 +378,29 @@ public class TestPersistentStoragePolicySatisfier {
   }
 
   /**
-   * Check whether file contains SPS xattr.
-   * @param fileName file name.
-   * @return true if file contains SPS xattr.
-   * @throws IOException
+   * Tests that Xattrs should be cleaned if all blocks already satisfied.
+   *
+   * @throws Exception
    */
-  private boolean fileContainsSPSXAttr(Path fileName) throws IOException {
-    final INode inode = cluster.getNamesystem()
-        .getFSDirectory().getINode(fileName.toString());
-    final XAttr satisfyXAttr =
-        XAttrHelper.buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
-    List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
-    return existingXAttrs.contains(satisfyXAttr);
+  @Test(timeout = 300000)
+  public void testSPSShouldNotLeakXattrIfStorageAlreadySatisfied()
+      throws Exception {
+    try {
+      clusterSetUp();
+      DFSTestUtil.waitExpectedStorageType(testFileName, StorageType.DISK, 3,
+          timeout, fs);
+      fs.satisfyStoragePolicy(testFile);
+
+      DFSTestUtil.waitExpectedStorageType(testFileName, StorageType.DISK, 3,
+          timeout, fs);
+
+      // Make sure satisfy xattr has been removed.
+      DFSTestUtil.waitForXattrRemoved(testFileName,
+          XATTR_SATISFY_STORAGE_POLICY, cluster.getNamesystem(), 30000);
+
+    } finally {
+      clusterShutdown();
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b03ec6e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 2a33455..8457e5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 import static org.junit.Assert.assertNull;
 
 import java.io.FileNotFoundException;
@@ -34,13 +35,17 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -828,6 +833,77 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Tests that Xattrs should be cleaned if satisfy storage policy called on EC
+   * file with unsuitable storage policy set.
+   *
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles()
+      throws Exception {
+    StorageType[][] diskTypes =
+        new StorageType[][]{{StorageType.SSD, StorageType.DISK},
+            {StorageType.SSD, StorageType.DISK},
+            {StorageType.SSD, StorageType.DISK},
+            {StorageType.SSD, StorageType.DISK},
+            {StorageType.SSD, StorageType.DISK},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.SSD}};
+
+    int defaultStripedBlockSize =
+        ErasureCodingPolicyManager.getSystemPolicies()[0].getCellSize() * 4;
+    config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
+    config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    config.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+        1L);
+    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
+        false);
+
+    try {
+      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+          storagesPerDatanode, capacity);
+
+      // set "/foo" directory with ONE_SSD storage policy.
+      ClientProtocol client = NameNodeProxies.createProxy(config,
+          hdfsCluster.getFileSystem(0).getUri(), ClientProtocol.class)
+          .getProxy();
+      String fooDir = "/foo";
+      client.mkdirs(fooDir, new FsPermission((short) 777), true);
+      // set an EC policy on "/foo" directory
+      client.setErasureCodingPolicy(fooDir, null);
+
+      // write file to fooDir
+      final String testFile = "/foo/bar";
+      long fileLen = 20 * defaultStripedBlockSize;
+      dfs = hdfsCluster.getFileSystem();
+      DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short) 3, 0);
+
+      // ONESSD is unsuitable storage policy on EC files
+      client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
+      dfs.satisfyStoragePolicy(new Path(testFile));
+
+      // Thread.sleep(9000); // To make sure SPS triggered
+      // verify storage types and locations
+      LocatedBlocks locatedBlocks =
+          client.getBlockLocations(testFile, 0, fileLen);
+      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+        for (StorageType type : lb.getStorageTypes()) {
+          Assert.assertEquals(StorageType.DISK, type);
+        }
+      }
+
+      // Make sure satisfy xattr has been removed.
+      DFSTestUtil.waitForXattrRemoved(testFile, XATTR_SATISFY_STORAGE_POLICY,
+          hdfsCluster.getNamesystem(), 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
   private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
       throws IOException {
     ArrayList<DataNode> dns = hdfsCluster.getDataNodes();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HDFS-11986. Dfsadmin should report erasure coding related information separately.

Posted by ra...@apache.org.
HDFS-11986. Dfsadmin should report erasure coding related information separately.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b89ffcff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b89ffcff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b89ffcff

Branch: refs/heads/HDFS-10285
Commit: b89ffcff362a872013f5d96c1fb76e0731402db4
Parents: 36bada3
Author: Manoj Govindassamy <ma...@apache.org>
Authored: Fri Aug 25 17:21:56 2017 -0700
Committer: Manoj Govindassamy <ma...@apache.org>
Committed: Fri Aug 25 17:21:56 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  36 +++-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 182 ++++++++++++++-----
 2 files changed, 160 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b89ffcff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 1fb1d5f..88aafe2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -66,11 +66,13 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeVolumeInfo;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -532,16 +534,30 @@ public class DFSAdmin extends FsShell {
      * minutes. Use "-metaSave" to list of all such blocks and accurate 
      * counts.
      */
-    System.out.println("Under replicated blocks: " + 
-                       dfs.getLowRedundancyBlocksCount());
-    System.out.println("Blocks with corrupt replicas: " + 
-                       dfs.getCorruptBlocksCount());
-    System.out.println("Missing blocks: " + 
-                       dfs.getMissingBlocksCount());
-    System.out.println("Missing blocks (with replication factor 1): " +
-                      dfs.getMissingReplOneBlocksCount());
-    System.out.println("Pending deletion blocks: " +
-        dfs.getPendingDeletionBlocksCount());
+    BlocksStats blocksStats = dfs.getClient().getNamenode().getBlocksStats();
+    System.out.println("Replicated Blocks:");
+    System.out.println("\tUnder replicated blocks: " +
+        blocksStats.getLowRedundancyBlocksStat());
+    System.out.println("\tBlocks with corrupt replicas: " +
+        blocksStats.getCorruptBlocksStat());
+    System.out.println("\tMissing blocks: " +
+        blocksStats.getMissingReplicaBlocksStat());
+    System.out.println("\tMissing blocks (with replication factor 1): " +
+        blocksStats.getMissingReplicationOneBlocksStat());
+    System.out.println("\tPending deletion blocks: " +
+        blocksStats.getPendingDeletionBlocksStat());
+
+    ECBlockGroupsStats ecBlockGroupsStats =
+        dfs.getClient().getNamenode().getECBlockGroupsStats();
+    System.out.println("Erasure Coded Block Groups: ");
+    System.out.println("\tLow redundancy block groups: " +
+        ecBlockGroupsStats.getLowRedundancyBlockGroupsStat());
+    System.out.println("\tBlock groups with corrupt internal blocks: " +
+        ecBlockGroupsStats.getCorruptBlockGroupsStat());
+    System.out.println("\tMissing block groups: " +
+        ecBlockGroupsStats.getMissingBlockGroupsStat());
+    System.out.println("\tPending deletion block groups: " +
+        ecBlockGroupsStats.getPendingDeletionBlockGroupsStat());
 
     System.out.println();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b89ffcff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index ed43dc0..2d38f2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -38,11 +38,18 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -498,24 +505,47 @@ public class TestDFSAdmin {
     return sb.toString();
   }
 
-  @Test(timeout = 120000)
+  // get block details and check if the block is corrupt
+  private void waitForCorruptBlock(MiniDFSCluster miniCluster,
+      DFSClient client, Path file)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        LocatedBlocks blocks = null;
+        try {
+          miniCluster.triggerBlockReports();
+          blocks = client.getNamenode().getBlockLocations(file.toString(), 0,
+              Long.MAX_VALUE);
+        } catch (IOException e) {
+          return false;
+        }
+        return blocks != null && blocks.get(0).isCorrupt();
+      }
+    }, 1000, 60000);
+  }
+
+  @Test(timeout = 180000)
   public void testReportCommand() throws Exception {
+    tearDown();
     redirectStream();
 
-    /* init conf */
+    // init conf
     final Configuration dfsConf = new HdfsConfiguration();
+    ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID(
+        SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
+    dfsConf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+        ecPolicy.getName());
     dfsConf.setInt(
-        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
-        500); // 0.5s
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
     dfsConf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     final Path baseDir = new Path(
         PathUtils.getTestDir(getClass()).getAbsolutePath(),
         GenericTestUtils.getMethodName());
     dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.toString());
+    final int numDn =
+        ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
 
-    final int numDn = 3;
-
-    /* init cluster */
     try(MiniDFSCluster miniCluster = new MiniDFSCluster
         .Builder(dfsConf)
         .numDataNodes(numDn).build()) {
@@ -523,34 +553,71 @@ public class TestDFSAdmin {
       miniCluster.waitActive();
       assertEquals(numDn, miniCluster.getDataNodes().size());
 
-      /* local vars */
       final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
       final DFSClient client = miniCluster.getFileSystem().getClient();
 
-      /* run and verify report command */
+      // Verify report command for all counts to be zero
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn, 0, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client);
 
-      /* shut down one DN */
-      final List<DataNode> datanodes = miniCluster.getDataNodes();
-      final DataNode last = datanodes.get(datanodes.size() - 1);
-      last.shutdown();
-      miniCluster.setDataNodeDead(last.getDatanodeId());
-
-      /* run and verify report command */
-      assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, client);
-
-      /* corrupt one block */
       final short replFactor = 1;
       final long fileLength = 512L;
-      final FileSystem fs = miniCluster.getFileSystem();
+      final DistributedFileSystem fs = miniCluster.getFileSystem();
       final Path file = new Path(baseDir, "/corrupted");
       DFSTestUtil.createFile(fs, file, fileLength, replFactor, 12345L);
       DFSTestUtil.waitReplication(fs, file, replFactor);
-
       final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
+      LocatedBlocks lbs = miniCluster.getFileSystem().getClient().
+          getNamenode().getBlockLocations(
+          file.toString(), 0, fileLength);
+      assertTrue("Unexpected block type: " + lbs.get(0),
+          lbs.get(0) instanceof LocatedBlock);
+      LocatedBlock locatedBlock = lbs.get(0);
+      DatanodeInfo locatedDataNode = locatedBlock.getLocations()[0];
+      LOG.info("Replica block located on: " + locatedDataNode);
+
+      Path ecDir = new Path(baseDir, "ec");
+      fs.mkdirs(ecDir);
+      fs.getClient().setErasureCodingPolicy(ecDir.toString(),
+          ecPolicy.getName());
+      Path ecFile = new Path(ecDir, "ec-file");
+      int stripesPerBlock = 2;
+      int cellSize = ecPolicy.getCellSize();
+      int blockSize = stripesPerBlock * cellSize;
+      int blockGroupSize =  ecPolicy.getNumDataUnits() * blockSize;
+      int totalBlockGroups = 1;
+      DFSTestUtil.createStripedFile(miniCluster, ecFile, ecDir,
+          totalBlockGroups, stripesPerBlock, false, ecPolicy);
+
+      // Verify report command for all counts to be zero
+      resetStream();
+      assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
+      verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client);
+
+      // Choose a DataNode to shutdown
+      final List<DataNode> datanodes = miniCluster.getDataNodes();
+      DataNode dataNodeToShutdown = null;
+      for (DataNode dn : datanodes) {
+        if (!dn.getDatanodeId().getDatanodeUuid().equals(
+            locatedDataNode.getDatanodeUuid())) {
+          dataNodeToShutdown = dn;
+          break;
+        }
+      }
+      assertTrue("Unable to choose a DataNode to shutdown!",
+          dataNodeToShutdown != null);
+
+      // Shut down the DataNode not hosting the replicated block
+      LOG.info("Shutting down: " + dataNodeToShutdown);
+      dataNodeToShutdown.shutdown();
+      miniCluster.setDataNodeDead(dataNodeToShutdown.getDatanodeId());
+
+      // Verify report command to show dead DataNode
+      assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, 0, client);
+
+      // Corrupt the replicated block
       final int blockFilesCorrupted = miniCluster
           .corruptBlockOnDataNodes(block);
       assertEquals("Fail to corrupt all replicas for block " + block,
@@ -564,35 +631,44 @@ public class TestDFSAdmin {
         // expected exception reading corrupt blocks
       }
 
-      /*
-       * Increase replication factor, this should invoke transfer request.
-       * Receiving datanode fails on checksum and reports it to namenode
-       */
+      // Increase replication factor, this should invoke transfer request.
+      // Receiving datanode fails on checksum and reports it to namenode
       fs.setReplication(file, (short) (replFactor + 1));
 
-      /* get block details and check if the block is corrupt */
-      GenericTestUtils.waitFor(new Supplier<Boolean>() {
-        @Override
-        public Boolean get() {
-          LocatedBlocks blocks = null;
-          try {
-            miniCluster.triggerBlockReports();
-            blocks = client.getNamenode().getBlockLocations(file.toString(), 0,
-                Long.MAX_VALUE);
-          } catch (IOException e) {
-            return false;
-          }
-          return blocks != null && blocks.get(0).isCorrupt();
-        }
-      }, 1000, 60000);
-
+      // get block details and check if the block is corrupt
       BlockManagerTestUtil.updateState(
           miniCluster.getNameNode().getNamesystem().getBlockManager());
+      waitForCorruptBlock(miniCluster, client, file);
+
+      // verify report command for corrupt replicated block
+      resetStream();
+      assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 0, client);
+
+      lbs = miniCluster.getFileSystem().getClient().
+          getNamenode().getBlockLocations(
+          ecFile.toString(), 0, blockGroupSize);
+      assertTrue("Unexpected block type: " + lbs.get(0),
+          lbs.get(0) instanceof LocatedStripedBlock);
+      LocatedStripedBlock bg =
+          (LocatedStripedBlock)(lbs.get(0));
+
+      miniCluster.getNamesystem().writeLock();
+      try {
+        BlockManager bm = miniCluster.getNamesystem().getBlockManager();
+        bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
+            "STORAGE_ID", "TEST");
+        BlockManagerTestUtil.updateState(bm);
+      } finally {
+        miniCluster.getNamesystem().writeUnlock();
+      }
+      waitForCorruptBlock(miniCluster, client, file);
 
-      /* run and verify report command */
+      // verify report command for corrupt replicated block
+      // and EC block group
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 1, client);
     }
   }
 
@@ -669,6 +745,7 @@ public class TestDFSAdmin {
       final int numDn,
       final int numLiveDn,
       final int numCorruptBlocks,
+      final int numCorruptECBlockGroups,
       final DFSClient client) throws IOException {
 
     /* init vars */
@@ -679,11 +756,15 @@ public class TestDFSAdmin {
     final String expectedCorruptedBlocksStr = String.format(
         "Blocks with corrupt replicas: %d",
         numCorruptBlocks);
+    final String expectedCorruptedECBlockGroupsStr = String.format(
+        "Block groups with corrupt internal blocks: %d",
+        numCorruptECBlockGroups);
 
-    /* verify nodes and corrupt blocks */
+    // verify nodes and corrupt blocks
     assertThat(outStr, is(allOf(
         containsString(expectedLiveNodesStr),
-        containsString(expectedCorruptedBlocksStr))));
+        containsString(expectedCorruptedBlocksStr),
+        containsString(expectedCorruptedECBlockGroupsStr))));
 
     assertEquals(
         numDn,
@@ -694,7 +775,12 @@ public class TestDFSAdmin {
     assertEquals(
         numDn - numLiveDn,
         client.getDatanodeStorageReport(DatanodeReportType.DEAD).length);
-    assertEquals(numCorruptBlocks, client.getCorruptBlocksCount());
+    assertEquals(numCorruptBlocks + numCorruptECBlockGroups,
+        client.getCorruptBlocksCount());
+    assertEquals(numCorruptBlocks, client.getNamenode()
+        .getBlocksStats().getCorruptBlocksStat());
+    assertEquals(numCorruptECBlockGroups, client.getNamenode()
+        .getECBlockGroupsStats().getCorruptBlockGroupsStat());
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks before removing the xattr. Contributed by Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks before removing the xattr. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9538801d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9538801d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9538801d

Branch: refs/heads/HDFS-10285
Commit: 9538801d44cd0bda2dfec599d92d882e2f2c14b8
Parents: 67161da
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Mon Jul 10 18:00:58 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:14 2017 +0530

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |  15 +++
 .../server/namenode/StoragePolicySatisfier.java |  20 +++-
 .../namenode/TestStoragePolicySatisfier.java    | 102 ++++++++++++++++++-
 ...stStoragePolicySatisfierWithStripedFile.java |  90 ++++++++++++++++
 4 files changed, 224 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9538801d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c832d95..2a26c35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4228,6 +4228,21 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
+   * Check file has low redundancy blocks.
+   */
+  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
+    boolean result = false;
+    for (BlockInfo block : bc.getBlocks()) {
+      short expected = getExpectedRedundancyNum(block);
+      final NumberReplicas n = countNodes(block);
+      if (expected > n.liveReplicas()) {
+        result = true;
+      }
+    }
+    return result;
+  }
+
+  /**
    * Check sufficient redundancy of the blocks in the collection. If any block
    * is needed reconstruction, insert it into the reconstruction queue.
    * Otherwise, if the block is more than the expected replication factor,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9538801d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 1b2afa3..97cbf1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -99,7 +99,10 @@ public class StoragePolicySatisfier implements Runnable {
     // Represents that, the analysis skipped due to some conditions.
     // Example conditions are if no blocks really exists in block collection or
     // if analysis is not required on ec files with unsuitable storage policies
-    BLOCKS_TARGET_PAIRING_SKIPPED;
+    BLOCKS_TARGET_PAIRING_SKIPPED,
+    // Represents that, All the reported blocks are satisfied the policy but
+    // some of the blocks are low redundant.
+    FEW_LOW_REDUNDANCY_BLOCKS
   }
 
   public StoragePolicySatisfier(final Namesystem namesystem,
@@ -247,6 +250,14 @@ public class StoragePolicySatisfier implements Runnable {
               case FEW_BLOCKS_TARGETS_PAIRED:
                 this.storageMovementsMonitor.add(blockCollectionID, false);
                 break;
+              case FEW_LOW_REDUNDANCY_BLOCKS:
+                if (LOG.isDebugEnabled()) {
+                  LOG.debug("Adding trackID " + blockCollectionID
+                      + " back to retry queue as some of the blocks"
+                      + " are low redundant.");
+                }
+                this.storageMovementNeeded.add(blockCollectionID);
+                break;
               // Just clean Xattrs
               case BLOCKS_TARGET_PAIRING_SKIPPED:
               case BLOCKS_ALREADY_SATISFIED:
@@ -347,11 +358,16 @@ public class StoragePolicySatisfier implements Runnable {
         boolean computeStatus = computeBlockMovingInfos(blockMovingInfos,
             blockInfo, expectedStorageTypes, existing, storages);
         if (computeStatus
-            && status != BlocksMovingAnalysisStatus.FEW_BLOCKS_TARGETS_PAIRED) {
+            && status != BlocksMovingAnalysisStatus.FEW_BLOCKS_TARGETS_PAIRED
+            && !blockManager.hasLowRedundancyBlocks(blockCollection)) {
           status = BlocksMovingAnalysisStatus.ALL_BLOCKS_TARGETS_PAIRED;
         } else {
           status = BlocksMovingAnalysisStatus.FEW_BLOCKS_TARGETS_PAIRED;
         }
+      } else {
+        if (blockManager.hasLowRedundancyBlocks(blockCollection)) {
+          status = BlocksMovingAnalysisStatus.FEW_LOW_REDUNDANCY_BLOCKS;
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9538801d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index f1a4169..7127895 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 
 import java.io.FileNotFoundException;
@@ -29,6 +30,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
@@ -55,6 +58,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Assert;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -582,7 +586,9 @@ public class TestStoragePolicySatisfier {
       Assert.assertTrue("SPS should be running as "
           + "no Mover really running", running);
     } finally {
-      hdfsCluster.shutdown();
+      if (hdfsCluster != null) {
+        hdfsCluster.shutdown();
+      }
     }
   }
 
@@ -983,6 +989,100 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Test SPS for low redundant file blocks.
+   * 1. Create cluster with 3 datanode.
+   * 1. Create one file with 3 replica.
+   * 2. Set policy and call satisfyStoragePolicy for file.
+   * 3. Stop NameNode and Datanodes.
+   * 4. Start NameNode with 2 datanode and wait for block movement.
+   * 5. Start third datanode.
+   * 6. Third Datanode replica also should be moved in proper
+   * sorage based on policy.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+          "3000");
+      StorageType[][] newtypes = new StorageType[][] {
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK}};
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
+          .storageTypes(newtypes).build();
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path filePath = new Path("/zeroSizeFile");
+      DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0);
+      fs.setStoragePolicy(filePath, "COLD");
+      List<DataNodeProperties> list = new ArrayList<>();
+      list.add(cluster.stopDataNode(0));
+      list.add(cluster.stopDataNode(0));
+      list.add(cluster.stopDataNode(0));
+      cluster.restartNameNodes();
+      cluster.restartDataNode(list.get(0), true);
+      cluster.restartDataNode(list.get(1), true);
+      cluster.waitActive();
+      fs.satisfyStoragePolicy(filePath);
+      Thread.sleep(3000 * 6);
+      cluster.restartDataNode(list.get(2), true);
+      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+          StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test SPS for extra redundant file blocks.
+   * 1. Create cluster with 5 datanode.
+   * 2. Create one file with 5 replica.
+   * 3. Set file replication to 3.
+   * 4. Set policy and call satisfyStoragePolicy for file.
+   * 5. Block should be moved successfully.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenFileHasExcessRedundancyBlocks() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+          "3000");
+      StorageType[][] newtypes = new StorageType[][] {
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK}};
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5)
+          .storageTypes(newtypes).build();
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path filePath = new Path("/zeroSizeFile");
+      DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0);
+      fs.setReplication(filePath, (short) 3);
+      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+          LogFactory.getLog(BlockStorageMovementAttemptedItems.class));
+      fs.setStoragePolicy(filePath, "COLD");
+      fs.satisfyStoragePolicy(filePath);
+      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+          StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
+      assertFalse("Log output does not contain expected log message: ",
+          logs.getOutput().contains("some of the blocks are low redundant"));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
       throws IOException {
     ArrayList<DataNode> dns = hdfsCluster.getDataNodes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9538801d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index eb4a6a3..195c9e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -27,8 +29,10 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
@@ -293,6 +297,92 @@ public class TestStoragePolicySatisfierWithStripedFile {
   }
 
   /**
+   * Test SPS for low redundant file blocks.
+   * 1. Create cluster with 10 datanode.
+   * 1. Create one striped file with default EC Policy.
+   * 2. Set policy and call satisfyStoragePolicy for file.
+   * 3. Stop NameNode and Datanodes.
+   * 4. Start NameNode with 5 datanode and wait for block movement.
+   * 5. Start remaining 5 datanode.
+   * 6. All replica  should be moved in proper storage based on policy.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
+    // start 10 datanodes
+    int numOfDatanodes = 10;
+    int storagesPerDatanode = 2;
+    long capacity = 20 * defaultStripeBlockSize;
+    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+    for (int i = 0; i < numOfDatanodes; i++) {
+      for (int j = 0; j < storagesPerDatanode; j++) {
+        capacities[i][j] = capacity;
+      }
+    }
+
+    final Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys
+        .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+        "3000");
+    initConfWithStripe(conf, defaultStripeBlockSize);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numOfDatanodes)
+        .storagesPerDatanode(storagesPerDatanode)
+        .storageTypes(new StorageType[][]{
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE}})
+        .storageCapacities(capacities)
+        .build();
+    try {
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path barDir = new Path("/bar");
+      fs.mkdirs(barDir);
+      // set an EC policy on "/bar" directory
+      fs.setErasureCodingPolicy(barDir, null);
+
+      // write file to barDir
+      final Path fooFile = new Path("/bar/foo");
+      long fileLen = cellSize * dataBlocks;
+      DFSTestUtil.createFile(cluster.getFileSystem(), fooFile,
+          fileLen, (short) 3, 0);
+
+      // Move file to ARCHIVE.
+      fs.setStoragePolicy(barDir, "COLD");
+      //Stop DataNodes and restart namenode
+      List<DataNodeProperties> list = new ArrayList<>(numOfDatanodes);
+      for (int i = 0; i < numOfDatanodes; i++) {
+        list.add(cluster.stopDataNode(0));
+      }
+      cluster.restartNameNodes();
+      // Restart half datanodes
+      for (int i = 0; i < numOfDatanodes / 2; i++) {
+        cluster.restartDataNode(list.get(i), true);
+      }
+      cluster.waitActive();
+      fs.satisfyStoragePolicy(fooFile);
+      Thread.sleep(3000 * 6);
+      //Start reaming datanodes
+      for (int i = numOfDatanodes - 1; i > numOfDatanodes / 2; i--) {
+        cluster.restartDataNode(list.get(i), true);
+      }
+      // verify storage types and locations.
+      waitExpectedStorageType(cluster, fooFile.toString(), fileLen,
+          StorageType.ARCHIVE, 9, 9, 60000);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+
+  /**
    * Tests to verify that for the given path, no blocks under the given path
    * will be scheduled for block movement as there are no available datanode
    * with required storage type.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HDFS-11186. [SPS]: Daemon thread of SPS should start only in Active NN. Contributed by Wei Zhou

Posted by ra...@apache.org.
HDFS-11186. [SPS]: Daemon thread of SPS should start only in Active NN. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b775bdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b775bdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b775bdb

Branch: refs/heads/HDFS-10285
Commit: 8b775bdb7759a0cb035762429812246027ed1ae0
Parents: 498e4de
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Thu Jan 12 09:01:30 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:05 2017 +0530

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |   9 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  11 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   6 +
 .../hadoop/hdfs/server/namenode/NameNode.java   |   7 ++
 .../TestStoragePolicySatisfierWithHA.java       | 109 +++++++++++++++++++
 5 files changed, 138 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b775bdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index fed1506..f053265 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -427,9 +427,15 @@ public class BlockManager implements BlockStatsMXBean {
    */
   private final short minReplicationToBeInMaintenance;
 
+  /**
+   * Whether HA is enabled.
+   */
+  private final boolean haEnabled;
+
   public BlockManager(final Namesystem namesystem, boolean haEnabled,
       final Configuration conf) throws IOException {
     this.namesystem = namesystem;
+    this.haEnabled = haEnabled;
     datanodeManager = new DatanodeManager(this, namesystem, conf);
     heartbeatManager = datanodeManager.getHeartbeatManager();
     this.blockIdManager = new BlockIdManager(this);
@@ -694,7 +700,7 @@ public class BlockManager implements BlockStatsMXBean {
     this.blockReportThread.start();
     mxBeanName = MBeans.register("NameNode", "BlockStats", this);
     bmSafeMode.activate(blockTotal);
-    if (sps != null) {
+    if (sps != null && !haEnabled) {
       sps.start();
     }
   }
@@ -4898,6 +4904,7 @@ public class BlockManager implements BlockStatsMXBean {
       LOG.info("Storage policy satisfier is already running.");
       return;
     }
+
     sps.start();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b775bdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index dd6f90e..d24b336 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
@@ -661,9 +662,13 @@ public class Mover {
           try {
             spsRunning = nnc.getDistributedFileSystem().getClient()
                 .isStoragePolicySatisfierRunning();
-          } catch (StandbyException e) {
-            System.err.println("Skip Standby Namenode. " + nnc.toString());
-            continue;
+          } catch (RemoteException e) {
+            IOException cause = e.unwrapRemoteException();
+            if (cause instanceof StandbyException) {
+              System.err.println("Skip Standby Namenode. " + nnc.toString());
+              continue;
+            }
+            throw e;
           }
           if (spsRunning) {
             System.err.println("Mover failed due to StoragePolicySatisfier"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b775bdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a5f5eaa..3cb47b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1274,6 +1274,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         FSDirEncryptionZoneOp.warmUpEdekCache(edekCacheLoader, dir,
             edekCacheLoaderDelay, edekCacheLoaderInterval);
       }
+
+      blockManager.activateSPS();
     } finally {
       startingActiveService = false;
       blockManager.checkSafeMode();
@@ -1303,6 +1305,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     LOG.info("Stopping services started for active state");
     writeLock();
     try {
+      if (blockManager != null) {
+        blockManager.deactivateSPS();
+      }
+
       stopSecretManager();
       leaseManager.stopMonitor();
       if (nnrmthread != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b775bdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 27c07e2..7cc46f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -2132,6 +2132,13 @@ public class NameNode extends ReconfigurableBase implements
                   + "we must pass true/false only"));
     }
 
+    if (!isActiveState()) {
+      throw new ReconfigurationException(property, newVal,
+          getConf().get(property), new HadoopIllegalArgumentException(
+          "Activating or deactivating storage policy satisfier service on "
+              + state + " NameNode is not allowed"));
+    }
+
     boolean activateSPS = Boolean.parseBoolean(newVal);
     if (activateSPS) {
       namesystem.getBlockManager().activateSPS();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b775bdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
new file mode 100644
index 0000000..4d226ff
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+/**
+ * Tests that StoragePolicySatisfier is able to work with HA enabled.
+ */
+public class TestStoragePolicySatisfierWithHA {
+  private MiniDFSCluster cluster = null;
+
+  @Before
+  public void setUp() throws IOException {
+    Configuration conf = new Configuration();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology())
+        .numDataNodes(1)
+        .build();
+  }
+
+  /**
+   * Tests to verify that SPS should run/stop automatically when NN state
+   * changes between Standby and Active.
+   */
+  @Test(timeout = 100000)
+  public void testWhenNNHAStateChanges() throws IOException {
+    try {
+      DistributedFileSystem fs;
+      boolean running;
+
+      cluster.waitActive();
+      fs = cluster.getFileSystem(0);
+
+      try {
+        fs.getClient().isStoragePolicySatisfierRunning();
+        Assert.fail("Call this function to Standby NN should "
+            + "raise an exception.");
+      } catch (RemoteException e) {
+        IOException cause = e.unwrapRemoteException();
+        if (!(cause instanceof StandbyException)) {
+          Assert.fail("Unexpected exception happened " + e);
+        }
+      }
+
+      cluster.transitionToActive(0);
+      running = fs.getClient().isStoragePolicySatisfierRunning();
+      Assert.assertTrue("StoragePolicySatisfier should be active "
+          + "when NN transits from Standby to Active mode.", running);
+
+      // NN transits from Active to Standby
+      cluster.transitionToStandby(0);
+      try {
+        fs.getClient().isStoragePolicySatisfierRunning();
+        Assert.fail("NN in Standby again, call this function should "
+            + "raise an exception.");
+      } catch (RemoteException e) {
+        IOException cause = e.unwrapRemoteException();
+        if (!(cause instanceof StandbyException)) {
+          Assert.fail("Unexpected exception happened " + e);
+        }
+      }
+
+      try {
+        cluster.getNameNode(0).reconfigurePropertyImpl(
+            DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
+        Assert.fail("It's not allowed to activate or deactivate"
+            + " StoragePolicySatisfier on Standby NameNode");
+      } catch (ReconfigurationException e) {
+        GenericTestUtils.assertExceptionContains("Could not change property "
+            + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY
+            + " from 'true' to 'false'", e);
+        GenericTestUtils.assertExceptionContains(
+            "Activating or deactivating storage policy satisfier service on "
+                + "standby NameNode is not allowed", e.getCause());
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67161da7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67161da7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67161da7

Branch: refs/heads/HDFS-10285
Commit: 67161da751db4c9231a2cc07dc3591618c41ce5a
Parents: 8cc681d
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Mon Jun 19 17:16:49 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:09 2017 +0530

----------------------------------------------------------------------
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 92 +++++++++++++++++++-
 .../src/site/markdown/ArchivalStorage.md        | 21 +++++
 .../src/site/markdown/HDFSCommands.md           |  2 +
 .../hdfs/tools/TestStoragePolicyCommands.java   | 43 ++++++++-
 4 files changed, 156 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67161da7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index 9c7d048..30420ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.google.common.base.Joiner;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -230,6 +232,92 @@ public class StoragePolicyAdmin extends Configured implements Tool {
     }
   }
 
+  /** Command to schedule blocks to move based on specified policy. */
+  private static class SatisfyStoragePolicyCommand implements
+      AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-satisfyStoragePolicy";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " -path <path>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      TableListing listing = AdminHelper.getOptionDescriptionListing();
+      listing.addRow("<path>", "The path of the file/directory to satisfy"
+          + " storage policy");
+      return getShortUsage() + "\n" +
+          "Schedule blocks to move based on file/directory policy.\n\n" +
+          listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String path = StringUtils.popOptionWithArgument("-path", args);
+      if (path == null) {
+        System.err.println("Please specify the path for setting the storage " +
+            "policy.\nUsage: " + getLongUsage());
+        return 1;
+      }
+
+      final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+      try {
+        dfs.satisfyStoragePolicy(new Path(path));
+        System.out.println("Scheduled blocks to move based on the current"
+            + " storage policy on " + path);
+      } catch (Exception e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
+  /** Command to check storage policy satisfier status. */
+  private static class IsSPSRunningCommand implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-isSPSRunning";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + "]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      return getShortUsage() + "\n" +
+          "Check the status of Storage Policy Statisfier.\n\n";
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      if (!args.isEmpty()) {
+        System.err.print("Can't understand arguments: "
+            + Joiner.on(" ").join(args) + "\n");
+        System.err.println("Usage is " + getLongUsage());
+        return 1;
+      }
+      final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+      try {
+        if(dfs.getClient().isStoragePolicySatisfierRunning()){
+          System.out.println("yes");
+        }else{
+          System.out.println("no");
+        }
+      } catch (Exception e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
   /* Command to unset the storage policy set for a file/directory */
   private static class UnsetStoragePolicyCommand
       implements AdminHelper.Command {
@@ -280,6 +368,8 @@ public class StoragePolicyAdmin extends Configured implements Tool {
       new ListStoragePoliciesCommand(),
       new SetStoragePolicyCommand(),
       new GetStoragePolicyCommand(),
-      new UnsetStoragePolicyCommand()
+      new UnsetStoragePolicyCommand(),
+      new SatisfyStoragePolicyCommand(),
+      new IsSPSRunningCommand()
   };
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67161da7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 91ad107..f1895fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -169,5 +169,26 @@ Get the storage policy of a file or a directory.
 |:---- |:---- |
 | `-path <path>` | The path referring to either a directory or a file. |
 
+### Satisfy Storage Policy
+
+Schedule blocks to move based on file/directory policy. This command applicable only to the given path and its immediate children. Sub-directories won't be considered for satisfying the policy.
+
+* Command:
+
+        hdfs storagepolicies -satisfyStoragePolicy -path <path>
+
+* Arguments:
+
+| | |
+|:---- |:---- |
+| `-path <path>` | The path referring to either a directory or a file. |
+
+### SPS Running Status
+
+Check the running status of Storage Policy Satisfier in namenode. If it is running, return 'yes'. Otherwise return 'no'.
+
+* Command:
+
+        hdfs storagepolicies -isSPSRunning
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67161da7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 5903a36..f5ad187 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -576,6 +576,8 @@ Usage:
           [-setStoragePolicy -path <path> -policy <policy>]
           [-getStoragePolicy -path <path>]
           [-unsetStoragePolicy -path <path>]
+          [-satisfyStoragePolicy -path <path>]
+          [-isSPSRunning]
           [-help <command-name>]
 
 Lists out all/Gets/sets/unsets storage policies. See the [HDFS Storage Policy Documentation](./ArchivalStorage.html) for more information.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67161da7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
index 149dabb..c86eecd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
@@ -21,6 +21,8 @@ import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -45,7 +47,10 @@ public class TestStoragePolicyCommands {
   @Before
   public void clusterSetUp() throws IOException {
     conf = new HdfsConfiguration();
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
+    StorageType[][] newtypes = new StorageType[][] {
+        {StorageType.ARCHIVE, StorageType.DISK}};
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL)
+        .storageTypes(newtypes).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
   }
@@ -157,4 +162,40 @@ public class TestStoragePolicyCommands {
     DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
         "File/Directory does not exist: /fooz");
   }
+
+  @Test
+  public void testStoragePolicySatisfierCommand() throws Exception {
+    final String file = "/testStoragePolicySatisfierCommand";
+    DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
+
+    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
+    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0,
+        "The storage policy of " + file + " is unspecified");
+
+    DFSTestUtil.toolRun(admin,
+        "-setStoragePolicy -path " + file + " -policy COLD", 0,
+        "Set storage policy COLD on " + file.toString());
+
+    DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0,
+        "Scheduled blocks to move based on the current storage policy on "
+            + file.toString());
+
+    DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000,
+        fs);
+  }
+
+  @Test
+  public void testIsSPSRunningCommand() throws Exception {
+    final String file = "/testIsSPSRunningCommand";
+    DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
+    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
+    DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "yes");
+    cluster.getNameNode().reconfigureProperty(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
+    cluster.waitActive();
+    DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "no");
+    // Test with unnecessary args
+    DFSTestUtil.toolRun(admin, "-isSPSRunning status", 1,
+        "Can't understand arguments: ");
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HDFS-11338: [SPS]: Fix timeout issue in unit tests caused by longger NN down time. Contributed by Wei Zhou and Rakesh R

Posted by ra...@apache.org.
HDFS-11338: [SPS]: Fix timeout issue in unit tests caused by longger NN down time. Contributed by Wei Zhou and Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2dac125e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2dac125e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2dac125e

Branch: refs/heads/HDFS-10285
Commit: 2dac125e41c193bfe82c0a35a41e1313557f215d
Parents: 4bb9b40
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Tue Apr 11 14:25:01 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:30 2017 +0530

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    | 13 +++++--
 .../BlockStorageMovementAttemptedItems.java     | 25 +++++++++----
 .../hdfs/server/namenode/FSNamesystem.java      |  2 +-
 .../server/namenode/StoragePolicySatisfier.java | 38 ++++++++++++++------
 .../TestBlockStorageMovementAttemptedItems.java |  3 +-
 5 files changed, 60 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dac125e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1a809a9..c832d95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -707,7 +707,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   public void close() {
     if (sps != null) {
-      sps.stop(false);
+      sps.deactivate(false);
     }
     bmSafeMode.close();
     try {
@@ -722,6 +722,7 @@ public class BlockManager implements BlockStatsMXBean {
     datanodeManager.close();
     pendingReconstruction.stop();
     blocksMap.close();
+    stopSPSGracefully();
   }
 
   /** @return the datanodeManager */
@@ -4919,10 +4920,18 @@ public class BlockManager implements BlockStatsMXBean {
       LOG.info("Storage policy satisfier is already stopped.");
       return;
     }
-    sps.stop(true);
+    sps.deactivate(true);
   }
 
   /**
+   * Timed wait to stop storage policy satisfier daemon threads.
+   */
+  public void stopSPSGracefully() {
+    if (sps != null) {
+      sps.stopGracefully();
+    }
+  }
+  /**
    * @return True if storage policy satisfier running.
    */
   public boolean isStoragePolicySatisfierRunning() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dac125e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index f15db73..26b98d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -130,21 +130,34 @@ public class BlockStorageMovementAttemptedItems {
   }
 
   /**
-   * Stops the monitor thread.
+   * Sets running flag to false. Also, this will interrupt monitor thread and
+   * clear all the queued up tasks.
    */
-  public synchronized void stop() {
+  public synchronized void deactivate() {
     monitorRunning = false;
     if (timerThread != null) {
       timerThread.interrupt();
-      try {
-        timerThread.join(3000);
-      } catch (InterruptedException ie) {
-      }
     }
     this.clearQueues();
   }
 
   /**
+   * Timed wait to stop monitor thread.
+   */
+  synchronized void stopGracefully() {
+    if (timerThread == null) {
+      return;
+    }
+    if (monitorRunning) {
+      deactivate();
+    }
+    try {
+      timerThread.join(3000);
+    } catch (InterruptedException ie) {
+    }
+  }
+
+  /**
    * This class contains information of an attempted trackID. Information such
    * as, (a)last attempted time stamp, (b)whether all the blocks in the trackID
    * were attempted and blocks movement has been scheduled to satisfy storage

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dac125e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b3663a1..3f2a6f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1308,7 +1308,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (blockManager != null) {
         blockManager.deactivateSPS();
       }
-
       stopSecretManager();
       leaseManager.stopMonitor();
       if (nnrmthread != null) {
@@ -1347,6 +1346,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         // Don't want to keep replication queues when not in Active.
         blockManager.clearQueues();
         blockManager.setInitializedReplQueues(false);
+        blockManager.stopSPSGracefully();
       }
     } finally {
       writeUnlock("stopActiveServices");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dac125e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 337d5b5..8cf9920 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -115,22 +115,21 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   /**
-   * Stop storage policy satisfier demon thread.
+   * Deactivates storage policy satisfier by stopping its services.
    *
-   * @param reconfigStop
+   * @param reconfig
+   *          true represents deactivating SPS service as requested by admin,
+   *          false otherwise
    */
-  public synchronized void stop(boolean reconfigStop) {
+  public synchronized void deactivate(boolean reconfig) {
     isRunning = false;
     if (storagePolicySatisfierThread == null) {
       return;
     }
+
     storagePolicySatisfierThread.interrupt();
-    try {
-      storagePolicySatisfierThread.join(3000);
-    } catch (InterruptedException ie) {
-    }
-    this.storageMovementsMonitor.stop();
-    if (reconfigStop) {
+    this.storageMovementsMonitor.deactivate();
+    if (reconfig) {
       LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
           + "deactivate it.");
       this.clearQueuesWithNotification();
@@ -141,6 +140,23 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   /**
+   * Timed wait to stop storage policy satisfier daemon threads.
+   */
+  public synchronized void stopGracefully() {
+    if (isRunning) {
+      deactivate(true);
+    }
+    this.storageMovementsMonitor.stopGracefully();
+    if (storagePolicySatisfierThread == null) {
+      return;
+    }
+    try {
+      storagePolicySatisfierThread.join(3000);
+    } catch (InterruptedException ie) {
+    }
+  }
+
+  /**
    * Check whether StoragePolicySatisfier is running.
    * @return true if running
    */
@@ -162,7 +178,7 @@ public class StoragePolicySatisfier implements Runnable {
       if (!isRunning) {
         // Stopping monitor thread and clearing queues as well
         this.clearQueues();
-        this.storageMovementsMonitor.stop();
+        this.storageMovementsMonitor.stopGracefully();
         LOG.error(
             "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
                 + HdfsServerConstants.MOVER_ID_PATH.toString()
@@ -194,7 +210,7 @@ public class StoragePolicySatisfier implements Runnable {
           isRunning = false;
           // Stopping monitor thread and clearing queues as well
           this.clearQueues();
-          this.storageMovementsMonitor.stop();
+          this.storageMovementsMonitor.stopGracefully();
         }
         if (!namesystem.isRunning()) {
           LOG.info("Stopping StoragePolicySatisfier.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dac125e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
index 95142d3..8c7d982 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
@@ -47,7 +47,8 @@ public class TestBlockStorageMovementAttemptedItems {
   @After
   public void teardown() {
     if (bsmAttemptedItems != null) {
-      bsmAttemptedItems.stop();
+      bsmAttemptedItems.deactivate();
+      bsmAttemptedItems.stopGracefully();
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. Contributed by Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a2c50b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a2c50b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a2c50b0

Branch: refs/heads/HDFS-10285
Commit: 4a2c50b09880d3b06e96d0d21b63cff838b2b0fa
Parents: f570493
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Wed Aug 23 15:37:03 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 12:03:44 2017 +0530

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |  21 +-
 .../server/blockmanagement/DatanodeManager.java |  14 +-
 .../hdfs/server/datanode/BPOfferService.java    |   1 +
 .../BlockStorageMovementAttemptedItems.java     |  95 +++++---
 .../namenode/BlockStorageMovementNeeded.java    | 233 ++++++++++++++++++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  91 +++-----
 .../hdfs/server/namenode/FSDirXAttrOp.java      |  11 +-
 .../hdfs/server/namenode/FSDirectory.java       |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 108 ++++++---
 .../TestStoragePolicySatisfyWorker.java         |   5 +-
 .../TestBlockStorageMovementAttemptedItems.java |  34 +--
 .../TestPersistentStoragePolicySatisfier.java   | 104 +++++++++
 .../namenode/TestStoragePolicySatisfier.java    | 127 +++++-----
 14 files changed, 589 insertions(+), 259 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3b185cd..419ca80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,7 +89,6 @@ import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementNeeded;
 import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -421,9 +420,6 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private final BlockStorageMovementNeeded storageMovementNeeded =
-      new BlockStorageMovementNeeded();
-
   /** Minimum live replicas needed for the datanode to be transitioned
    * from ENTERING_MAINTENANCE to IN_MAINTENANCE.
    */
@@ -466,8 +462,7 @@ public class BlockManager implements BlockStatsMXBean {
         conf.getBoolean(
             DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
             DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-    sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
-        conf);
+    sps = new StoragePolicySatisfier(namesystem, this, conf);
     blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
     this.maxCorruptFilesReturned = conf.getInt(
@@ -4869,20 +4864,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Set file block collection for which storage movement needed for its blocks.
-   *
-   * @param id
-   *          - file block collection id.
-   */
-  public void satisfyStoragePolicy(long id) {
-    storageMovementNeeded.add(id);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Added block collection id {} to block "
-          + "storageMovementNeeded queue", id);
-    }
-  }
-
-  /**
    * Gets the storage policy satisfier instance.
    *
    * @return sps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a298843..3504cb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1749,6 +1749,13 @@ public class DatanodeManager {
       }
     }
 
+    if (nodeinfo.shouldDropSPSWork()) {
+      cmds.add(DropSPSWorkCommand.DNA_DROP_SPS_WORK_COMMAND);
+      // Set back to false to indicate that the new value has been sent to the
+      // datanode.
+      nodeinfo.setDropSPSWork(false);
+    }
+
     // check pending block storage movement tasks
     BlockStorageMovementInfosBatch blkStorageMovementInfosBatch = nodeinfo
         .getBlocksToMoveStorages();
@@ -1760,13 +1767,6 @@ public class DatanodeManager {
           blkStorageMovementInfosBatch.getBlockMovingInfo()));
     }
 
-    if (nodeinfo.shouldDropSPSWork()) {
-      cmds.add(DropSPSWorkCommand.DNA_DROP_SPS_WORK_COMMAND);
-      // Set back to false to indicate that the new value has been sent to the
-      // datanode.
-      nodeinfo.setDropSPSWork(false);
-    }
-
     if (!cmds.isEmpty()) {
       return cmds.toArray(new DatanodeCommand[cmds.size()]);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 39ff4b9..d60fb6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -823,6 +823,7 @@ class BPOfferService {
     case DatanodeProtocol.DNA_UNCACHE:
     case DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION:
     case DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT:
+    case DatanodeProtocol.DNA_DROP_SPS_WORK_COMMAND:
       LOG.warn("Got a command from standby NN - ignoring command:" + cmd.getAction());
       break;
     default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 37833e2..278b62b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -28,6 +28,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier.ItemInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult.Status;
 import org.apache.hadoop.util.Daemon;
@@ -54,7 +55,7 @@ public class BlockStorageMovementAttemptedItems {
    * A map holds the items which are already taken for blocks movements
    * processing and sent to DNs.
    */
-  private final Map<Long, ItemInfo> storageMovementAttemptedItems;
+  private final Map<Long, AttemptedItemInfo> storageMovementAttemptedItems;
   private final List<BlocksStorageMovementResult> storageMovementAttemptedResults;
   private volatile boolean monitorRunning = true;
   private Daemon timerThread = null;
@@ -91,18 +92,19 @@ public class BlockStorageMovementAttemptedItems {
    * Add item to block storage movement attempted items map which holds the
    * tracking/blockCollection id versus time stamp.
    *
-   * @param blockCollectionID
-   *          - tracking id / block collection id
+   * @param itemInfo
+   *          - tracking info
    * @param allBlockLocsAttemptedToSatisfy
-   *          - failed to find matching target nodes to satisfy storage type for
-   *          all the block locations of the given blockCollectionID
+   *          - failed to find matching target nodes to satisfy storage type
+   *          for all the block locations of the given blockCollectionID
    */
-  public void add(Long blockCollectionID,
-      boolean allBlockLocsAttemptedToSatisfy) {
+  public void add(ItemInfo itemInfo, boolean allBlockLocsAttemptedToSatisfy) {
     synchronized (storageMovementAttemptedItems) {
-      ItemInfo itemInfo = new ItemInfo(monotonicNow(),
+      AttemptedItemInfo attemptedItemInfo = new AttemptedItemInfo(
+          itemInfo.getRootId(), itemInfo.getTrackId(), monotonicNow(),
           allBlockLocsAttemptedToSatisfy);
-      storageMovementAttemptedItems.put(blockCollectionID, itemInfo);
+      storageMovementAttemptedItems.put(itemInfo.getTrackId(),
+          attemptedItemInfo);
     }
   }
 
@@ -167,21 +169,27 @@ public class BlockStorageMovementAttemptedItems {
    * satisfy storage policy. This is used by
    * {@link BlockStorageMovementAttemptedItems#storageMovementAttemptedItems}.
    */
-  private final static class ItemInfo {
+  private final static class AttemptedItemInfo extends ItemInfo {
     private long lastAttemptedOrReportedTime;
     private final boolean allBlockLocsAttemptedToSatisfy;
 
     /**
-     * ItemInfo constructor.
+     * AttemptedItemInfo constructor.
      *
+     * @param rootId
+     *          rootId for trackId
+     * @param trackId
+     *          trackId for file.
      * @param lastAttemptedOrReportedTime
      *          last attempted or reported time
      * @param allBlockLocsAttemptedToSatisfy
      *          whether all the blocks in the trackID were attempted and blocks
      *          movement has been scheduled to satisfy storage policy
      */
-    private ItemInfo(long lastAttemptedOrReportedTime,
+    private AttemptedItemInfo(long rootId, long trackId,
+        long lastAttemptedOrReportedTime,
         boolean allBlockLocsAttemptedToSatisfy) {
+      super(rootId, trackId);
       this.lastAttemptedOrReportedTime = lastAttemptedOrReportedTime;
       this.allBlockLocsAttemptedToSatisfy = allBlockLocsAttemptedToSatisfy;
     }
@@ -211,6 +219,7 @@ public class BlockStorageMovementAttemptedItems {
     private void touchLastReportedTimeStamp() {
       this.lastAttemptedOrReportedTime = monotonicNow();
     }
+
   }
 
   /**
@@ -239,18 +248,20 @@ public class BlockStorageMovementAttemptedItems {
   @VisibleForTesting
   void blocksStorageMovementUnReportedItemsCheck() {
     synchronized (storageMovementAttemptedItems) {
-      Iterator<Entry<Long, ItemInfo>> iter = storageMovementAttemptedItems
-          .entrySet().iterator();
+      Iterator<Entry<Long, AttemptedItemInfo>> iter =
+          storageMovementAttemptedItems.entrySet().iterator();
       long now = monotonicNow();
       while (iter.hasNext()) {
-        Entry<Long, ItemInfo> entry = iter.next();
-        ItemInfo itemInfo = entry.getValue();
+        Entry<Long, AttemptedItemInfo> entry = iter.next();
+        AttemptedItemInfo itemInfo = entry.getValue();
         if (now > itemInfo.getLastAttemptedOrReportedTime()
             + selfRetryTimeout) {
           Long blockCollectionID = entry.getKey();
           synchronized (storageMovementAttemptedResults) {
             if (!isExistInResult(blockCollectionID)) {
-              blockStorageMovementNeeded.add(blockCollectionID);
+              ItemInfo candidate = new ItemInfo(
+                  itemInfo.getRootId(), blockCollectionID);
+              blockStorageMovementNeeded.add(candidate);
               iter.remove();
               LOG.info("TrackID: {} becomes timed out and moved to needed "
                   + "retries queue for next iteration.", blockCollectionID);
@@ -297,17 +308,30 @@ public class BlockStorageMovementAttemptedItems {
         synchronized (storageMovementAttemptedItems) {
           Status status = storageMovementAttemptedResult.getStatus();
           long trackId = storageMovementAttemptedResult.getTrackId();
-          ItemInfo itemInfo;
+          AttemptedItemInfo attemptedItemInfo = storageMovementAttemptedItems
+              .get(trackId);
+          // itemInfo is null means no root for trackId, using trackId only as
+          // root and handling it in
+          // blockStorageMovementNeeded#removeIteamTrackInfo() for cleaning
+          // the xAttr
+          ItemInfo itemInfo = new ItemInfo((attemptedItemInfo != null)
+              ? attemptedItemInfo.getRootId() : trackId, trackId);
           switch (status) {
           case FAILURE:
-            blockStorageMovementNeeded.add(trackId);
-            LOG.warn("Blocks storage movement results for the tracking id: {}"
-                + " is reported from co-ordinating datanode, but result"
-                + " status is FAILURE. So, added for retry", trackId);
+            if (attemptedItemInfo != null) {
+              blockStorageMovementNeeded.add(itemInfo);
+              LOG.warn("Blocks storage movement results for the tracking id:"
+                  + "{} is reported from co-ordinating datanode, but result"
+                  + " status is FAILURE. So, added for retry", trackId);
+            } else {
+              LOG.info("Blocks storage movement is FAILURE for the track"
+                  + " id {}. But the trackID doesn't exists in"
+                  + " storageMovementAttemptedItems list.", trackId);
+              blockStorageMovementNeeded
+                  .removeItemTrackInfo(itemInfo);
+            }
             break;
           case SUCCESS:
-            itemInfo = storageMovementAttemptedItems.get(trackId);
-
             // ItemInfo could be null. One case is, before the blocks movements
             // result arrives the attempted trackID became timed out and then
             // removed the trackID from the storageMovementAttemptedItems list.
@@ -318,33 +342,32 @@ public class BlockStorageMovementAttemptedItems {
             // storage policy in previous SPS iteration.
             String msg = "Blocks storage movement is SUCCESS for the track id: "
                 + trackId + " reported from co-ordinating datanode.";
-            if (itemInfo != null) {
-              if (!itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
-                blockStorageMovementNeeded.add(trackId);
+            if (attemptedItemInfo != null) {
+              if (!attemptedItemInfo.isAllBlockLocsAttemptedToSatisfy()) {
+                blockStorageMovementNeeded
+                    .add(new ItemInfo(attemptedItemInfo.getRootId(), trackId));
                 LOG.warn("{} But adding trackID back to retry queue as some of"
                     + " the blocks couldn't find matching target nodes in"
                     + " previous SPS iteration.", msg);
               } else {
                 LOG.info(msg);
-                // Remove xattr for the track id.
-                this.sps.postBlkStorageMovementCleanup(
-                    storageMovementAttemptedResult.getTrackId());
+                blockStorageMovementNeeded
+                    .removeItemTrackInfo(itemInfo);
               }
             } else {
               LOG.info("{} But the trackID doesn't exists in "
                   + "storageMovementAttemptedItems list", msg);
-              // Remove xattr for the track id.
-              this.sps.postBlkStorageMovementCleanup(
-                  storageMovementAttemptedResult.getTrackId());
+              blockStorageMovementNeeded
+              .removeItemTrackInfo(itemInfo);
             }
             break;
           case IN_PROGRESS:
             isInprogress = true;
-            itemInfo = storageMovementAttemptedItems
+            attemptedItemInfo = storageMovementAttemptedItems
                 .get(storageMovementAttemptedResult.getTrackId());
-            if(itemInfo != null){
+            if(attemptedItemInfo != null){
               // update the attempted expiration time to next cycle.
-              itemInfo.touchLastReportedTimeStamp();
+              attemptedItemInfo.touchLastReportedTimeStamp();
             }
             break;
           default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
index 3241e6d..41a3a6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java
@@ -17,28 +17,86 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
 import java.util.Queue;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier.ItemInfo;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.util.Daemon;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
- * A Class to track the block collection IDs for which physical storage movement
- * needed as per the Namespace and StorageReports from DN.
+ * A Class to track the block collection IDs (Inode's ID) for which physical
+ * storage movement needed as per the Namespace and StorageReports from DN.
+ * It scan the pending directories for which storage movement is required and
+ * schedule the block collection IDs for movement. It track the info of
+ * scheduled items and remove the SPS xAttr from the file/Directory once
+ * movement is success.
  */
 @InterfaceAudience.Private
 public class BlockStorageMovementNeeded {
-  private final Queue<Long> storageMovementNeeded = new LinkedList<Long>();
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(BlockStorageMovementNeeded.class);
+
+  private final Queue<ItemInfo> storageMovementNeeded =
+      new LinkedList<ItemInfo>();
 
   /**
-   * Add the block collection id to tracking list for which storage movement
+   * Map of rootId and number of child's. Number of child's indicate the number
+   * of files pending to satisfy the policy.
+   */
+  private final Map<Long, Integer> pendingWorkForDirectory =
+      new HashMap<Long, Integer>();
+
+  private final Namesystem namesystem;
+
+  // List of pending dir to satisfy the policy
+  private final Queue<Long> spsDirsToBeTraveresed = new LinkedList<Long>();
+
+  private final StoragePolicySatisfier sps;
+
+  private Daemon fileInodeIdCollector;
+
+  public BlockStorageMovementNeeded(Namesystem namesystem,
+      StoragePolicySatisfier sps) {
+    this.namesystem = namesystem;
+    this.sps = sps;
+  }
+
+  /**
+   * Add the candidate to tracking list for which storage movement
    * expected if necessary.
    *
-   * @param blockCollectionID
-   *          - block collection id, which is nothing but inode id.
+   * @param trackInfo
+   *          - track info for satisfy the policy
    */
-  public synchronized void add(Long blockCollectionID) {
-    storageMovementNeeded.add(blockCollectionID);
+  public synchronized void add(ItemInfo trackInfo) {
+    storageMovementNeeded.add(trackInfo);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement
+   * expected if necessary.
+   * @param rootId
+   *            - root inode id
+   * @param itemInfoList
+   *            - List of child in the directory
+   */
+  private synchronized void addAll(Long rootId,
+      List<ItemInfo> itemInfoList) {
+    storageMovementNeeded.addAll(itemInfoList);
+    pendingWorkForDirectory.put(rootId, itemInfoList.size());
   }
 
   /**
@@ -47,11 +105,168 @@ public class BlockStorageMovementNeeded {
    *
    * @return block collection ID
    */
-  public synchronized Long get() {
+  public synchronized ItemInfo get() {
     return storageMovementNeeded.poll();
   }
 
+  public synchronized void addToPendingDirQueue(long id) {
+    spsDirsToBeTraveresed.add(id);
+    // Notify waiting FileInodeIdCollector thread about the newly
+    // added SPS path.
+    synchronized (spsDirsToBeTraveresed) {
+      spsDirsToBeTraveresed.notify();
+    }
+  }
+
   public synchronized void clearAll() {
+    spsDirsToBeTraveresed.clear();
     storageMovementNeeded.clear();
+    pendingWorkForDirectory.clear();
+  }
+
+  /**
+   * Decrease the pending child count for directory once one file blocks moved
+   * successfully. Remove the SPS xAttr if pending child count is zero.
+   */
+  public synchronized void removeItemTrackInfo(ItemInfo trackInfo)
+      throws IOException {
+    if (trackInfo.isDir()) {
+      // If track is part of some root then reduce the pending directory work
+      // count.
+      long rootId = trackInfo.getRootId();
+      INode inode = namesystem.getFSDirectory().getInode(rootId);
+      if (inode == null) {
+        // directory deleted just remove it.
+        this.pendingWorkForDirectory.remove(rootId);
+      } else {
+        if (pendingWorkForDirectory.get(rootId) != null) {
+          Integer pendingWork = pendingWorkForDirectory.get(rootId) - 1;
+          pendingWorkForDirectory.put(rootId, pendingWork);
+          if (pendingWork <= 0) {
+            namesystem.removeXattr(rootId, XATTR_SATISFY_STORAGE_POLICY);
+            pendingWorkForDirectory.remove(rootId);
+          }
+        }
+      }
+    } else {
+      // Remove xAttr if trackID doesn't exist in
+      // storageMovementAttemptedItems or file policy satisfied.
+      namesystem.removeXattr(trackInfo.getTrackId(),
+          XATTR_SATISFY_STORAGE_POLICY);
+    }
+  }
+
+  public synchronized void clearQueue(long trackId) {
+    spsDirsToBeTraveresed.remove(trackId);
+    Iterator<ItemInfo> iterator = storageMovementNeeded.iterator();
+    while (iterator.hasNext()) {
+      ItemInfo next = iterator.next();
+      if (next.getRootId() == trackId) {
+        iterator.remove();
+      }
+    }
+    pendingWorkForDirectory.remove(trackId);
+  }
+
+  /**
+   * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
+   * and notify to clean up required resources.
+   * @throws IOException
+   */
+  public synchronized void clearQueuesWithNotification() {
+    // Remove xAttr from directories
+    Long trackId;
+    while ((trackId = spsDirsToBeTraveresed.poll()) != null) {
+      try {
+        // Remove xAttr for file
+        namesystem.removeXattr(trackId, XATTR_SATISFY_STORAGE_POLICY);
+      } catch (IOException ie) {
+        LOG.warn("Failed to remove SPS xattr for track id " + trackId, ie);
+      }
+    }
+
+    // File's directly added to storageMovementNeeded, So try to remove
+    // xAttr for file
+    ItemInfo itemInfo;
+    while ((itemInfo = storageMovementNeeded.poll()) != null) {
+      try {
+        // Remove xAttr for file
+        if (!itemInfo.isDir()) {
+          namesystem.removeXattr(itemInfo.getTrackId(),
+              XATTR_SATISFY_STORAGE_POLICY);
+        }
+      } catch (IOException ie) {
+        LOG.warn(
+            "Failed to remove SPS xattr for track id "
+                + itemInfo.getTrackId(), ie);
+      }
+    }
+    this.clearAll();
+  }
+
+  /**
+   * Take dir tack ID from the spsDirsToBeTraveresed queue and collect child
+   * ID's to process for satisfy the policy.
+   */
+  private class FileInodeIdCollector implements Runnable {
+    @Override
+    public void run() {
+      LOG.info("Starting FileInodeIdCollector!.");
+      while (namesystem.isRunning() && sps.isRunning()) {
+        try {
+          if (!namesystem.isInSafeMode()) {
+            FSDirectory fsd = namesystem.getFSDirectory();
+            Long rootINodeId = spsDirsToBeTraveresed.poll();
+            if (rootINodeId == null) {
+              // Waiting for SPS path
+              synchronized (spsDirsToBeTraveresed) {
+                spsDirsToBeTraveresed.wait(5000);
+              }
+            } else {
+              INode rootInode = fsd.getInode(rootINodeId);
+              if (rootInode != null) {
+                // TODO : HDFS-12291
+                // 1. Implement an efficient recursive directory iteration
+                // mechanism and satisfies storage policy for all the files
+                // under the given directory.
+                // 2. Process files in batches,so datanodes workload can be
+                // handled.
+                List<ItemInfo> itemInfoList =
+                    new ArrayList<>();
+                for (INode childInode : rootInode.asDirectory()
+                    .getChildrenList(Snapshot.CURRENT_STATE_ID)) {
+                  if (childInode.isFile()
+                      && childInode.asFile().numBlocks() != 0) {
+                    itemInfoList.add(
+                        new ItemInfo(rootINodeId, childInode.getId()));
+                  }
+                }
+                if (itemInfoList.isEmpty()) {
+                  // satisfy track info is empty, so remove the xAttr from the
+                  // directory
+                  namesystem.removeXattr(rootINodeId,
+                      XATTR_SATISFY_STORAGE_POLICY);
+                }
+                addAll(rootINodeId, itemInfoList);
+              }
+            }
+          }
+        } catch (Throwable t) {
+          LOG.warn("Exception while loading inodes to satisfy the policy", t);
+        }
+      }
+    }
+  }
+
+  public void start() {
+    fileInodeIdCollector = new Daemon(new FileInodeIdCollector());
+    fileInodeIdCollector.setName("FileInodeIdCollector");
+    fileInodeIdCollector.start();
+  }
+
+  public void stop() {
+    if (fileInodeIdCollector != null) {
+      fileInodeIdCollector.interrupt();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
index bd4e5ed..fb6eec9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
 
@@ -31,6 +30,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 import com.google.common.collect.Lists;
 
@@ -60,10 +60,24 @@ final class FSDirSatisfyStoragePolicyOp {
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
       }
-      XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-      if (satisfyXAttr != null) {
+      INode inode = FSDirectory.resolveLastINode(iip);
+      if (inodeHasSatisfyXAttr(inode)) {
+        throw new IOException(
+            "Cannot request to call satisfy storage policy on path "
+                + inode.getFullPathName()
+                + ", as this file/dir was already called for satisfying "
+                + "storage policy.");
+      }
+      if (unprotectedSatisfyStoragePolicy(inode, fsd)) {
+        XAttr satisfyXAttr = XAttrHelper
+            .buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
         List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
         xAttrs.add(satisfyXAttr);
+        List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+        List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs,
+            xAttrs, EnumSet.of(XAttrSetFlag.CREATE));
+        XAttrStorage.updateINodeXAttrs(inode, newXAttrs,
+            iip.getLatestSnapshotId());
         fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
       }
     } finally {
@@ -72,62 +86,29 @@ final class FSDirSatisfyStoragePolicyOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
-      BlockManager bm, FSDirectory fsd) throws IOException {
-
-    final INode inode = FSDirectory.resolveLastINode(iip);
-    final int snapshotId = iip.getLatestSnapshotId();
-    final List<INode> candidateNodes = new ArrayList<>();
-
-    // TODO: think about optimization here, label the dir instead
-    // of the sub-files of the dir.
+  static boolean unprotectedSatisfyStoragePolicy(INode inode, FSDirectory fsd) {
     if (inode.isFile() && inode.asFile().numBlocks() != 0) {
-      candidateNodes.add(inode);
-    } else if (inode.isDirectory()) {
-      for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-        if (node.isFile() && node.asFile().numBlocks() != 0) {
-          candidateNodes.add(node);
-        }
-      }
-    }
-
-    if (candidateNodes.isEmpty()) {
-      return null;
+      // Adding directly in the storageMovementNeeded queue, So it can
+      // get more priority compare to directory.
+      fsd.getBlockManager().getStoragePolicySatisfier()
+          .satisfyStoragePolicy(inode.getId());
+      return true;
+    } else if (inode.isDirectory()
+        && inode.asDirectory().getChildrenNum(Snapshot.CURRENT_STATE_ID) > 0) {
+      // Adding directory in the pending queue, so FileInodeIdCollector process
+      // directory child in batch and recursively
+      fsd.getBlockManager().getStoragePolicySatisfier()
+          .addInodeToPendingDirQueue(inode.getId());
+      return true;
     }
-    // If node has satisfy xattr, then stop adding it
-    // to satisfy movement queue.
-    if (inodeHasSatisfyXAttr(candidateNodes)) {
-      throw new IOException(
-          "Cannot request to call satisfy storage policy on path "
-              + iip.getPath()
-              + ", as this file/dir was already called for satisfying "
-              + "storage policy.");
-    }
-
-    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
-    final XAttr satisfyXAttr = XAttrHelper
-        .buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
-    xattrs.add(satisfyXAttr);
-
-    for (INode node : candidateNodes) {
-      bm.satisfyStoragePolicy(node.getId());
-      List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(node);
-      List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs,
-          xattrs, EnumSet.of(XAttrSetFlag.CREATE));
-      XAttrStorage.updateINodeXAttrs(node, newXAttrs, snapshotId);
-    }
-    return satisfyXAttr;
+    return false;
   }
 
-  private static boolean inodeHasSatisfyXAttr(List<INode> candidateNodes) {
-    // If the node is a directory and one of the child files
-    // has satisfy xattr, then return true for this directory.
-    for (INode inode : candidateNodes) {
-      final XAttrFeature f = inode.getXAttrFeature();
-      if (inode.isFile() && f != null
-          && f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null) {
-        return true;
-      }
+  private static boolean inodeHasSatisfyXAttr(INode inode) {
+    final XAttrFeature f = inode.getXAttrFeature();
+    if (inode.isFile() && f != null
+        && f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null) {
+      return true;
     }
     return false;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index b73b1ab..e964a23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -196,6 +196,14 @@ class FSDirXAttrOp {
     List<XAttr> newXAttrs = filterINodeXAttrs(existingXAttrs, toRemove,
                                               removedXAttrs);
     if (existingXAttrs.size() != newXAttrs.size()) {
+      for (XAttr xattr : toRemove) {
+        if (XATTR_SATISFY_STORAGE_POLICY
+            .equals(XAttrHelper.getPrefixedName(xattr))) {
+          fsd.getBlockManager().getStoragePolicySatisfier()
+              .clearQueue(inode.getId());
+          break;
+        }
+      }
       XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
       return removedXAttrs;
     }
@@ -287,8 +295,7 @@ class FSDirXAttrOp {
 
       // Add inode id to movement queue if xattrs contain satisfy xattr.
       if (XATTR_SATISFY_STORAGE_POLICY.equals(xaName)) {
-        FSDirSatisfyStoragePolicyOp.unprotectedSatisfyStoragePolicy(iip,
-            fsd.getBlockManager(), fsd);
+        FSDirSatisfyStoragePolicyOp.unprotectedSatisfyStoragePolicy(inode, fsd);
         continue;
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index b8d90f9..648232e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1359,7 +1359,7 @@ public class FSDirectory implements Closeable {
     if (xattr == null) {
       return;
     }
-    getBlockManager().satisfyStoragePolicy(inode.getId());
+    FSDirSatisfyStoragePolicyOp.unprotectedSatisfyStoragePolicy(inode, this);
   }
 
   private void addEncryptionZone(INodeWithAdditionalFields inode,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 57bfa2f..2d05090 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1306,7 +1306,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     writeLock();
     try {
       if (blockManager != null) {
-        blockManager.stopSPS(true);
+        blockManager.stopSPS(false);
       }
       stopSecretManager();
       leaseManager.stopMonitor();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 3165813..48d0598 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -106,10 +103,10 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   public StoragePolicySatisfier(final Namesystem namesystem,
-      final BlockStorageMovementNeeded storageMovementNeeded,
       final BlockManager blkManager, Configuration conf) {
     this.namesystem = namesystem;
-    this.storageMovementNeeded = storageMovementNeeded;
+    this.storageMovementNeeded = new BlockStorageMovementNeeded(namesystem,
+        this);
     this.blockManager = blkManager;
     this.storageMovementsMonitor = new BlockStorageMovementAttemptedItems(
         conf.getLong(
@@ -146,7 +143,7 @@ public class StoragePolicySatisfier implements Runnable {
     // Ensure that all the previously submitted block movements(if any) have to
     // be stopped in all datanodes.
     addDropSPSWorkCommandsToAllDNs();
-
+    storageMovementNeeded.start();
     storagePolicySatisfierThread = new Daemon(this);
     storagePolicySatisfierThread.setName("StoragePolicySatisfier");
     storagePolicySatisfierThread.start();
@@ -162,14 +159,17 @@ public class StoragePolicySatisfier implements Runnable {
    */
   public synchronized void disable(boolean forceStop) {
     isRunning = false;
+
     if (storagePolicySatisfierThread == null) {
       return;
     }
 
+    storageMovementNeeded.stop();
+
     storagePolicySatisfierThread.interrupt();
     this.storageMovementsMonitor.stop();
     if (forceStop) {
-      this.clearQueuesWithNotification();
+      storageMovementNeeded.clearQueuesWithNotification();
       addDropSPSWorkCommandsToAllDNs();
     } else {
       LOG.info("Stopping StoragePolicySatisfier.");
@@ -184,6 +184,7 @@ public class StoragePolicySatisfier implements Runnable {
       disable(true);
     }
     this.storageMovementsMonitor.stopGracefully();
+
     if (storagePolicySatisfierThread == null) {
       return;
     }
@@ -220,10 +221,11 @@ public class StoragePolicySatisfier implements Runnable {
     while (namesystem.isRunning() && isRunning) {
       try {
         if (!namesystem.isInSafeMode()) {
-          Long blockCollectionID = storageMovementNeeded.get();
-          if (blockCollectionID != null) {
+          ItemInfo itemInfo = storageMovementNeeded.get();
+          if (itemInfo != null) {
+            long trackId = itemInfo.getTrackId();
             BlockCollection blockCollection =
-                namesystem.getBlockCollection(blockCollectionID);
+                namesystem.getBlockCollection(trackId);
             // Check blockCollectionId existence.
             if (blockCollection != null) {
               BlocksMovingAnalysisStatus status =
@@ -234,21 +236,21 @@ public class StoragePolicySatisfier implements Runnable {
                 // Just add to monitor, so it will be tracked for result and
                 // be removed on successful storage movement result.
               case ALL_BLOCKS_TARGETS_PAIRED:
-                this.storageMovementsMonitor.add(blockCollectionID, true);
+                this.storageMovementsMonitor.add(itemInfo, true);
                 break;
               // Add to monitor with allBlcoksAttemptedToSatisfy flag false, so
               // that it will be tracked and still it will be consider for retry
               // as analysis was not found targets for storage movement blocks.
               case FEW_BLOCKS_TARGETS_PAIRED:
-                this.storageMovementsMonitor.add(blockCollectionID, false);
+                this.storageMovementsMonitor.add(itemInfo, false);
                 break;
               case FEW_LOW_REDUNDANCY_BLOCKS:
                 if (LOG.isDebugEnabled()) {
-                  LOG.debug("Adding trackID " + blockCollectionID
+                  LOG.debug("Adding trackID " + trackId
                       + " back to retry queue as some of the blocks"
                       + " are low redundant.");
                 }
-                this.storageMovementNeeded.add(blockCollectionID);
+                this.storageMovementNeeded.add(itemInfo);
                 break;
               // Just clean Xattrs
               case BLOCKS_TARGET_PAIRING_SKIPPED:
@@ -256,9 +258,13 @@ public class StoragePolicySatisfier implements Runnable {
               default:
                 LOG.info("Block analysis skipped or blocks already satisfied"
                     + " with storages. So, Cleaning up the Xattrs.");
-                postBlkStorageMovementCleanup(blockCollectionID);
+                storageMovementNeeded.removeItemTrackInfo(itemInfo);
                 break;
               }
+            } else {
+              // File doesn't exists (maybe got deleted), remove trackId from
+              // the queue
+              storageMovementNeeded.removeItemTrackInfo(itemInfo);
             }
           }
         }
@@ -828,31 +834,63 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   /**
-   * Clean all the movements in storageMovementNeeded and notify
-   * to clean up required resources.
-   * @throws IOException
+   * Set file inode in queue for which storage movement needed for its blocks.
+   *
+   * @param inodeId
+   *          - file inode/blockcollection id.
    */
-  private void clearQueuesWithNotification() {
-    Long id;
-    while ((id = storageMovementNeeded.get()) != null) {
-      try {
-        postBlkStorageMovementCleanup(id);
-      } catch (IOException ie) {
-        LOG.warn("Failed to remove SPS "
-            + "xattr for collection id " + id, ie);
-      }
+  public void satisfyStoragePolicy(Long inodeId) {
+    //For file rootId and trackId is same
+    storageMovementNeeded.add(new ItemInfo(inodeId, inodeId));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Added track info for inode {} to block "
+          + "storageMovementNeeded queue", inodeId);
     }
   }
 
+  public void addInodeToPendingDirQueue(long id) {
+    storageMovementNeeded.addToPendingDirQueue(id);
+  }
+
+  /**
+   * Clear queues for given track id.
+   */
+  public void clearQueue(long trackId) {
+    storageMovementNeeded.clearQueue(trackId);
+  }
+
   /**
-   * When block movement has been finished successfully, some additional
-   * operations should be notified, for example, SPS xattr should be
-   * removed.
-   * @param trackId track id i.e., block collection id.
-   * @throws IOException
+   * ItemInfo is a file info object for which need to satisfy the
+   * policy.
    */
-  public void postBlkStorageMovementCleanup(long trackId)
-      throws IOException {
-    this.namesystem.removeXattr(trackId, XATTR_SATISFY_STORAGE_POLICY);
+  public static class ItemInfo {
+    private long rootId;
+    private long trackId;
+
+    public ItemInfo(long rootId, long trackId) {
+      this.rootId = rootId;
+      this.trackId = trackId;
+    }
+
+    /**
+     * Return the root of the current track Id.
+     */
+    public long getRootId() {
+      return rootId;
+    }
+
+    /**
+     * Return the File inode Id for which needs to satisfy the policy.
+     */
+    public long getTrackId() {
+      return trackId;
+    }
+
+    /**
+     * Returns true if the tracking path is a directory, false otherwise.
+     */
+    public boolean isDir() {
+      return (rootId != trackId);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
index 402d4d1..b84b1d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
@@ -115,9 +114,7 @@ public class TestStoragePolicySatisfyWorker {
     // move to ARCHIVE
     dfs.setStoragePolicy(new Path(file), "COLD");
 
-    FSNamesystem namesystem = cluster.getNamesystem();
-    INode inode = namesystem.getFSDirectory().getINode(file);
-    namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+    dfs.satisfyStoragePolicy(new Path(file));
 
     cluster.triggerHeartbeats();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
index 04a63ac..55ebf9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockStorageMovementAttemptedItems.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import static org.apache.hadoop.util.Time.monotonicNow;
 import static org.junit.Assert.*;
 
+import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier.ItemInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.junit.After;
 import org.junit.Before;
@@ -38,7 +39,9 @@ public class TestBlockStorageMovementAttemptedItems {
 
   @Before
   public void setup() throws Exception {
-    unsatisfiedStorageMovementFiles = new BlockStorageMovementNeeded();
+    unsatisfiedStorageMovementFiles = new BlockStorageMovementNeeded(
+        Mockito.mock(Namesystem.class),
+        Mockito.mock(StoragePolicySatisfier.class));
     StoragePolicySatisfier sps = Mockito.mock(StoragePolicySatisfier.class);
     bsmAttemptedItems = new BlockStorageMovementAttemptedItems(100,
         selfRetryTimeout, unsatisfiedStorageMovementFiles, sps);
@@ -57,9 +60,9 @@ public class TestBlockStorageMovementAttemptedItems {
     long stopTime = monotonicNow() + (retryTimeout * 2);
     boolean isItemFound = false;
     while (monotonicNow() < (stopTime)) {
-      Long ele = null;
+      ItemInfo ele = null;
       while ((ele = unsatisfiedStorageMovementFiles.get()) != null) {
-        if (item.longValue() == ele.longValue()) {
+        if (item == ele.getTrackId()) {
           isItemFound = true;
           break;
         }
@@ -77,7 +80,7 @@ public class TestBlockStorageMovementAttemptedItems {
   public void testAddResultWithFailureResult() throws Exception {
     bsmAttemptedItems.start(); // start block movement result monitor thread
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item, true);
+    bsmAttemptedItems.add(new ItemInfo(0L, item), true);
     bsmAttemptedItems.addResults(
         new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
             item.longValue(), BlocksStorageMovementResult.Status.FAILURE)});
@@ -88,7 +91,7 @@ public class TestBlockStorageMovementAttemptedItems {
   public void testAddResultWithSucessResult() throws Exception {
     bsmAttemptedItems.start(); // start block movement result monitor thread
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item, true);
+    bsmAttemptedItems.add(new ItemInfo(0L, item), true);
     bsmAttemptedItems.addResults(
         new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
             item.longValue(), BlocksStorageMovementResult.Status.SUCCESS)});
@@ -99,7 +102,7 @@ public class TestBlockStorageMovementAttemptedItems {
   public void testNoResultAdded() throws Exception {
     bsmAttemptedItems.start(); // start block movement result monitor thread
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item, true);
+    bsmAttemptedItems.add(new ItemInfo(0L, item), true);
     // After self retry timeout, it should be added back for retry
     assertTrue("Failed to add to the retry list",
         checkItemMovedForRetry(item, 600));
@@ -115,7 +118,7 @@ public class TestBlockStorageMovementAttemptedItems {
   @Test(timeout = 30000)
   public void testPartialBlockMovementShouldBeRetried1() throws Exception {
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item, false);
+    bsmAttemptedItems.add(new ItemInfo(0L, item), false);
     bsmAttemptedItems.addResults(
         new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
             item.longValue(), BlocksStorageMovementResult.Status.SUCCESS)});
@@ -136,7 +139,7 @@ public class TestBlockStorageMovementAttemptedItems {
   @Test(timeout = 30000)
   public void testPartialBlockMovementShouldBeRetried2() throws Exception {
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item, false);
+    bsmAttemptedItems.add(new ItemInfo(0L, item), false);
     bsmAttemptedItems.addResults(
         new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
             item.longValue(), BlocksStorageMovementResult.Status.SUCCESS)});
@@ -153,17 +156,20 @@ public class TestBlockStorageMovementAttemptedItems {
   }
 
   /**
-   * Partial block movement with only BlocksStorageMovementResult#FAILURE result
-   * and storageMovementAttemptedItems list is empty.
+   * Partial block movement with only BlocksStorageMovementResult#FAILURE
+   * result and storageMovementAttemptedItems list is empty.
    */
   @Test(timeout = 30000)
-  public void testPartialBlockMovementShouldBeRetried3() throws Exception {
+  public void testPartialBlockMovementWithEmptyAttemptedQueue()
+      throws Exception {
     Long item = new Long(1234);
     bsmAttemptedItems.addResults(
         new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
-            item.longValue(), BlocksStorageMovementResult.Status.FAILURE)});
+            item, BlocksStorageMovementResult.Status.FAILURE)});
     bsmAttemptedItems.blockStorageMovementResultCheck();
-    assertTrue("Failed to add to the retry list",
+    assertFalse(
+        "Should not add in queue again if it is not there in"
+            + " storageMovementAttemptedItems",
         checkItemMovedForRetry(item, 5000));
     assertEquals("Failed to remove from the attempted list", 0,
         bsmAttemptedItems.getAttemptedItemsCount());
@@ -176,7 +182,7 @@ public class TestBlockStorageMovementAttemptedItems {
   @Test(timeout = 30000)
   public void testPartialBlockMovementShouldBeRetried4() throws Exception {
     Long item = new Long(1234);
-    bsmAttemptedItems.add(item, false);
+    bsmAttemptedItems.add(new ItemInfo(0L, item), false);
     bsmAttemptedItems.addResults(
         new BlocksStorageMovementResult[]{new BlocksStorageMovementResult(
             item.longValue(), BlocksStorageMovementResult.Status.FAILURE)});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 8516ea0..e7b9148 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -20,16 +20,22 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
+import com.google.common.base.Supplier;
+
 import java.io.IOException;
+import java.util.List;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 import static org.junit.Assert.*;
@@ -482,6 +488,104 @@ public class TestPersistentStoragePolicySatisfier {
   }
 
   /**
+   * Test SPS xAttr on directory. xAttr should be removed from the directory
+   * once all the files blocks moved to specific storage.
+   */
+  @Test(timeout = 300000)
+  public void testSPSxAttrWhenSpsCalledForDir() throws Exception {
+    try {
+      clusterSetUp();
+      Path parent = new Path("/parent");
+      // create parent dir
+      fs.mkdirs(parent);
+
+      // create 10 child files
+      for (int i = 0; i < 5; i++) {
+        DFSTestUtil.createFile(fs, new Path(parent, "f" + i), 1024, (short) 3,
+            0);
+      }
+
+      // Set storage policy for parent directory
+      fs.setStoragePolicy(parent, "COLD");
+
+      // Stop one DN so we can check the SPS xAttr for directory.
+      DataNodeProperties stopDataNode = cluster.stopDataNode(0);
+
+      fs.satisfyStoragePolicy(parent);
+
+      // Check xAttr for parent directory
+      FSNamesystem namesystem = cluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode("/parent");
+      XAttrFeature f = inode.getXAttrFeature();
+      assertTrue("SPS xAttr should be exist",
+          f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null);
+
+      // check for the child, SPS xAttr should not be there
+      for (int i = 0; i < 5; i++) {
+        inode = namesystem.getFSDirectory().getINode("/parent/f" + i);
+        f = inode.getXAttrFeature();
+        assertTrue(f == null);
+      }
+
+      cluster.restartDataNode(stopDataNode, false);
+
+      // wait and check all the file block moved in ARCHIVE
+      for (int i = 0; i < 5; i++) {
+        DFSTestUtil.waitExpectedStorageType("/parent/f" + i,
+            StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
+      }
+      DFSTestUtil.waitForXattrRemoved("/parent", XATTR_SATISFY_STORAGE_POLICY,
+          namesystem, 10000);
+    } finally {
+      clusterShutdown();
+    }
+
+  }
+
+  /**
+   * Test SPS xAttr on file. xAttr should be removed from the file
+   * once all the blocks moved to specific storage.
+   */
+  @Test(timeout = 300000)
+  public void testSPSxAttrWhenSpsCalledForFile() throws Exception {
+    try {
+      clusterSetUp();
+      Path file = new Path("/file");
+      DFSTestUtil.createFile(fs, file, 1024, (short) 3, 0);
+
+      // Set storage policy for file
+      fs.setStoragePolicy(file, "COLD");
+
+      // Stop one DN so we can check the SPS xAttr for file.
+      DataNodeProperties stopDataNode = cluster.stopDataNode(0);
+
+      fs.satisfyStoragePolicy(file);
+
+      // Check xAttr for parent directory
+      FSNamesystem namesystem = cluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode("/file");
+      XAttrFeature f = inode.getXAttrFeature();
+      assertTrue("SPS xAttr should be exist",
+          f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null);
+
+      cluster.restartDataNode(stopDataNode, false);
+
+      // wait and check all the file block moved in ARCHIVE
+      DFSTestUtil.waitExpectedStorageType("/file", StorageType.ARCHIVE, 3,
+          30000, cluster.getFileSystem());
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+          return !existingXAttrs.contains(XATTR_SATISFY_STORAGE_POLICY);
+        }
+      }, 100, 10000);
+    } finally {
+      clusterShutdown();
+    }
+  }
+
+  /**
    * Restart the hole env and trigger the DataNode's heart beats.
    * @throws Exception
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a2c50b0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 2536834..3375590 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -119,8 +119,6 @@ public class TestStoragePolicySatisfier {
   private void doTestWhenStoragePolicySetToCOLD() throws Exception {
     // Change policy to COLD
     dfs.setStoragePolicy(new Path(file), COLD);
-    FSNamesystem namesystem = hdfsCluster.getNamesystem();
-    INode inode = namesystem.getFSDirectory().getINode(file);
 
     StorageType[][] newtypes =
         new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
@@ -129,7 +127,7 @@ public class TestStoragePolicySatisfier {
     startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
         storagesPerDatanode, capacity, hdfsCluster);
 
-    namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+    dfs.satisfyStoragePolicy(new Path(file));
 
     hdfsCluster.triggerHeartbeats();
     // Wait till namenode notified about the block location details
@@ -144,8 +142,6 @@ public class TestStoragePolicySatisfier {
       createCluster();
       // Change policy to ALL_SSD
       dfs.setStoragePolicy(new Path(file), "ALL_SSD");
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
 
       StorageType[][] newtypes =
           new StorageType[][]{{StorageType.SSD, StorageType.DISK},
@@ -156,7 +152,7 @@ public class TestStoragePolicySatisfier {
       // datanodes.
       startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
           storagesPerDatanode, capacity, hdfsCluster);
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier Identified that block to move to SSD
       // areas
@@ -174,8 +170,6 @@ public class TestStoragePolicySatisfier {
       createCluster();
       // Change policy to ONE_SSD
       dfs.setStoragePolicy(new Path(file), ONE_SSD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
 
       StorageType[][] newtypes =
           new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
@@ -184,7 +178,7 @@ public class TestStoragePolicySatisfier {
       // datanodes.
       startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
           storagesPerDatanode, capacity, hdfsCluster);
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier Identified that block to move to SSD
       // areas
@@ -207,8 +201,6 @@ public class TestStoragePolicySatisfier {
       createCluster();
       // Change policy to ONE_SSD
       dfs.setStoragePolicy(new Path(file), ONE_SSD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
 
       StorageType[][] newtypes =
           new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
@@ -217,7 +209,7 @@ public class TestStoragePolicySatisfier {
       // datanodes.
       startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
           storagesPerDatanode, capacity, hdfsCluster);
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
       hdfsCluster.triggerHeartbeats();
 
       // Wait till the block is moved to SSD areas
@@ -250,13 +242,10 @@ public class TestStoragePolicySatisfier {
         files.add(file1);
         writeContent(file1);
       }
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      List<Long> blockCollectionIds = new ArrayList<>();
       // Change policy to ONE_SSD
       for (String fileName : files) {
         dfs.setStoragePolicy(new Path(fileName), ONE_SSD);
-        INode inode = namesystem.getFSDirectory().getINode(fileName);
-        blockCollectionIds.add(inode.getId());
+        dfs.satisfyStoragePolicy(new Path(fileName));
       }
 
       StorageType[][] newtypes =
@@ -266,9 +255,6 @@ public class TestStoragePolicySatisfier {
       // datanodes.
       startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
           storagesPerDatanode, capacity, hdfsCluster);
-      for (long inodeId : blockCollectionIds) {
-        namesystem.getBlockManager().satisfyStoragePolicy(inodeId);
-      }
       hdfsCluster.triggerHeartbeats();
 
       for (String fileName : files) {
@@ -279,7 +265,7 @@ public class TestStoragePolicySatisfier {
             fileName, StorageType.DISK, 2, 30000, dfs);
       }
 
-      waitForBlocksMovementResult(blockCollectionIds.size(), 30000);
+      waitForBlocksMovementResult(files.size(), 30000);
     } finally {
       shutdownCluster();
     }
@@ -441,8 +427,6 @@ public class TestStoragePolicySatisfier {
       createCluster();
       // Change policy to COLD
       dfs.setStoragePolicy(new Path(file), COLD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
 
       StorageType[][] newtypes =
           new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE}};
@@ -451,7 +435,7 @@ public class TestStoragePolicySatisfier {
       startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
           storagesPerDatanode, capacity, hdfsCluster);
 
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier identified that block to move to
       // ARCHIVE area.
@@ -486,8 +470,6 @@ public class TestStoragePolicySatisfier {
       createCluster();
       // Change policy to COLD
       dfs.setStoragePolicy(new Path(file), COLD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
 
       StorageType[][] newtypes =
           new StorageType[][]{{StorageType.DISK, StorageType.DISK}};
@@ -495,7 +477,7 @@ public class TestStoragePolicySatisfier {
       startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
           storagesPerDatanode, capacity, hdfsCluster);
 
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
       hdfsCluster.triggerHeartbeats();
 
       // No block movement will be scheduled as there is no target node
@@ -600,47 +582,51 @@ public class TestStoragePolicySatisfier {
    */
   @Test(timeout = 120000)
   public void testMoveWithBlockPinning() throws Exception {
-    config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
-    config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-        true);
-    hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(3)
-        .storageTypes(
-            new StorageType[][] {{StorageType.DISK, StorageType.DISK},
-                {StorageType.DISK, StorageType.DISK},
-                {StorageType.DISK, StorageType.DISK}})
-        .build();
-
-    hdfsCluster.waitActive();
-    dfs = hdfsCluster.getFileSystem();
+    try{
+      config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
+      config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+          true);
+      hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(3)
+          .storageTypes(
+              new StorageType[][] {{StorageType.DISK, StorageType.DISK},
+                  {StorageType.DISK, StorageType.DISK},
+                  {StorageType.DISK, StorageType.DISK}})
+          .build();
 
-    // create a file with replication factor 3 and mark 2 pinned block
-    // locations.
-    final String file1 = createFileAndSimulateFavoredNodes(2);
+      hdfsCluster.waitActive();
+      dfs = hdfsCluster.getFileSystem();
 
-    // Change policy to COLD
-    dfs.setStoragePolicy(new Path(file1), COLD);
-    FSNamesystem namesystem = hdfsCluster.getNamesystem();
-    INode inode = namesystem.getFSDirectory().getINode(file1);
+      // create a file with replication factor 3 and mark 2 pinned block
+      // locations.
+      final String file1 = createFileAndSimulateFavoredNodes(2);
 
-    StorageType[][] newtypes =
-        new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
-            {StorageType.ARCHIVE, StorageType.ARCHIVE},
-            {StorageType.ARCHIVE, StorageType.ARCHIVE}};
-    // Adding DISK based datanodes
-    startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
-        storagesPerDatanode, capacity, hdfsCluster);
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(file1), COLD);
 
-    namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
-    hdfsCluster.triggerHeartbeats();
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+      // Adding DISK based datanodes
+      startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
 
-    // No block movement will be scheduled as there is no target node available
-    // with the required storage type.
-    waitForAttemptedItems(1, 30000);
-    waitForBlocksMovementResult(1, 30000);
-    DFSTestUtil.waitExpectedStorageType(
-        file1, StorageType.ARCHIVE, 1, 30000, dfs);
-    DFSTestUtil.waitExpectedStorageType(
-        file1, StorageType.DISK, 2, 30000, dfs);
+      dfs.satisfyStoragePolicy(new Path(file1));
+      hdfsCluster.triggerHeartbeats();
+
+      // No block movement will be scheduled as there is no target node
+      // available with the required storage type.
+      waitForAttemptedItems(1, 30000);
+      waitForBlocksMovementResult(1, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file1, StorageType.ARCHIVE, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          file1, StorageType.DISK, 2, 30000, dfs);
+    } finally {
+      if (hdfsCluster != null) {
+        hdfsCluster.shutdown();
+      }
+    }
   }
 
   /**
@@ -682,10 +668,8 @@ public class TestStoragePolicySatisfier {
 
       // Change policy to COLD
       dfs.setStoragePolicy(new Path(file), COLD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
 
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier identified that block to move to
       // ARCHIVE area.
@@ -723,10 +707,8 @@ public class TestStoragePolicySatisfier {
 
       // Change policy to ONE_SSD
       dfs.setStoragePolicy(new Path(file), ONE_SSD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
 
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
       hdfsCluster.triggerHeartbeats();
       DFSTestUtil.waitExpectedStorageType(
           file, StorageType.SSD, 1, 30000, dfs);
@@ -764,10 +746,7 @@ public class TestStoragePolicySatisfier {
 
       // Change policy to WARM
       dfs.setStoragePolicy(new Path(file), "WARM");
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
-
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
       hdfsCluster.triggerHeartbeats();
 
       DFSTestUtil.waitExpectedStorageType(
@@ -848,8 +827,6 @@ public class TestStoragePolicySatisfier {
 
       // Change policy to ONE_SSD
       dfs.setStoragePolicy(new Path(file), ONE_SSD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
       Path filePath = new Path("/testChooseInSameDatanode");
       final FSDataOutputStream out =
           dfs.create(filePath, false, 100, (short) 1, 2 * DEFAULT_BLOCK_SIZE);
@@ -872,7 +849,7 @@ public class TestStoragePolicySatisfier {
       for (DataNode dataNode : dataNodes) {
         DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNode, true);
       }
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      dfs.satisfyStoragePolicy(new Path(file));
 
       // Wait for items to be processed
       waitForAttemptedItems(1, 30000);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. Surendra Singh Lilhore.

Posted by ra...@apache.org.
HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f736094c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f736094c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f736094c

Branch: refs/heads/HDFS-10285
Commit: f736094cf90d58aac8fce590a351ef4aa6c14e27
Parents: cce0fdc
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Mon Jun 5 12:32:41 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:55 2017 +0530

----------------------------------------------------------------------
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 15 ++++++---
 .../namenode/TestStoragePolicySatisfier.java    | 32 ++++++++++++++++++++
 2 files changed, 42 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f736094c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
index 81d337f..bd4e5ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -51,7 +51,6 @@ final class FSDirSatisfyStoragePolicyOp {
 
     assert fsd.getFSNamesystem().hasWriteLock();
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -62,8 +61,11 @@ final class FSDirSatisfyStoragePolicyOp {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
       }
       XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-      xAttrs.add(satisfyXAttr);
-      fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+      if (satisfyXAttr != null) {
+        List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+        xAttrs.add(satisfyXAttr);
+        fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+      }
     } finally {
       fsd.writeUnlock();
     }
@@ -79,16 +81,19 @@ final class FSDirSatisfyStoragePolicyOp {
 
     // TODO: think about optimization here, label the dir instead
     // of the sub-files of the dir.
-    if (inode.isFile()) {
+    if (inode.isFile() && inode.asFile().numBlocks() != 0) {
       candidateNodes.add(inode);
     } else if (inode.isDirectory()) {
       for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-        if (node.isFile()) {
+        if (node.isFile() && node.asFile().numBlocks() != 0) {
           candidateNodes.add(node);
         }
       }
     }
 
+    if (candidateNodes.isEmpty()) {
+      return null;
+    }
     // If node has satisfy xattr, then stop adding it
     // to satisfy movement queue.
     if (inodeHasSatisfyXAttr(candidateNodes)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f736094c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fa954b8..8e08a1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -907,6 +907,38 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Test SPS with empty file.
+   * 1. Create one empty file.
+   * 2. Call satisfyStoragePolicy for empty file.
+   * 3. SPS should skip this file and xattr should not be added for empty file.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenFileLengthIsZero() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
+          .build();
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path filePath = new Path("/zeroSizeFile");
+      DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
+      FSEditLog editlog = cluster.getNameNode().getNamesystem().getEditLog();
+      long lastWrittenTxId = editlog.getLastWrittenTxId();
+      fs.satisfyStoragePolicy(filePath);
+      Assert.assertEquals("Xattr should not be added for the file",
+          lastWrittenTxId, editlog.getLastWrittenTxId());
+      INode inode = cluster.getNameNode().getNamesystem().getFSDirectory()
+          .getINode(filePath.toString());
+      Assert.assertTrue("XAttrFeature should be null for file",
+          inode.getXAttrFeature() == null);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
       throws IOException {
     ArrayList<DataNode> dns = hdfsCluster.getDataNodes();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDFS-12248. SNN will not upload fsimage on IOE and Interrupted exceptions. (Brahma Reddy Battula)

Posted by ra...@apache.org.
HDFS-12248. SNN will not upload fsimage on IOE and Interrupted exceptions. (Brahma Reddy Battula)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb6a3c83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb6a3c83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb6a3c83

Branch: refs/heads/HDFS-10285
Commit: bb6a3c83305f97090f980c53adaaf37baf18c698
Parents: 3bee704
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Sat Aug 26 13:46:20 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Sat Aug 26 13:47:12 2017 +0800

----------------------------------------------------------------------
 .../namenode/CheckpointFaultInjector.java       | 18 ++++++--
 .../server/namenode/ha/StandbyCheckpointer.java | 16 ++++---
 .../apache/hadoop/hdfs/TestRollingUpgrade.java  | 48 ++++++++++++++++++++
 3 files changed, 71 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6a3c83/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
index 1852094..eeb082b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
@@ -24,13 +24,17 @@ import java.io.IOException;
  * Utility class to faciliate some fault injection tests for the checkpointing
  * process.
  */
-class CheckpointFaultInjector {
-  static CheckpointFaultInjector instance = new CheckpointFaultInjector();
-  
-  static CheckpointFaultInjector getInstance() {
+public class CheckpointFaultInjector {
+  public static CheckpointFaultInjector instance =
+      new CheckpointFaultInjector();
+
+  public static CheckpointFaultInjector getInstance() {
     return instance;
   }
-  
+
+  public static void set(CheckpointFaultInjector instance) {
+    CheckpointFaultInjector.instance = instance;
+  }
   public void beforeGetImageSetsHeaders() throws IOException {}
   public void afterSecondaryCallsRollEditLog() throws IOException {}
   public void duringMerge() throws IOException {}
@@ -46,4 +50,8 @@ class CheckpointFaultInjector {
   
   public void afterMD5Rename() throws IOException {}
   public void beforeEditsRename() throws IOException {}
+
+  public void duringUploadInProgess() throws InterruptedException, IOException {
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6a3c83/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index 753447b..789ed9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointConf;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
@@ -228,7 +229,9 @@ public class StandbyCheckpointer {
       Future<TransferFsImage.TransferResult> upload =
           executor.submit(new Callable<TransferFsImage.TransferResult>() {
             @Override
-            public TransferFsImage.TransferResult call() throws IOException {
+            public TransferFsImage.TransferResult call()
+                throws IOException, InterruptedException {
+              CheckpointFaultInjector.getInstance().duringUploadInProgess();
               return TransferFsImage.uploadImageFromStorage(activeNNAddress, conf, namesystem
                   .getFSImage().getStorage(), imageType, txid, canceler);
             }
@@ -258,11 +261,12 @@ public class StandbyCheckpointer {
         break;
       }
     }
-    lastUploadTime = monotonicNow();
-
-    // we are primary if we successfully updated the ANN
-    this.isPrimaryCheckPointer = success;
-
+    if (ie == null && ioe == null) {
+      //Update only when response from remote about success or
+      lastUploadTime = monotonicNow();
+      // we are primary if we successfully updated the ANN
+      this.isPrimaryCheckPointer = success;
+    }
     // cleaner than copying code for multiple catch statements and better than catching all
     // exceptions, so we just handle the ones we expect.
     if (ie != null || ioe != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6a3c83/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index b356fb9..0545b04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.File;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ThreadLocalRandom;
 
 import javax.management.AttributeNotFoundException;
@@ -46,6 +47,7 @@ import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
@@ -568,6 +570,52 @@ public class TestRollingUpgrade {
     testCheckpoint(3);
   }
 
+  @Test(timeout = 60000)
+  public void testRollBackImage() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);
+    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 2);
+    MiniQJMHACluster cluster = null;
+    CheckpointFaultInjector old = CheckpointFaultInjector.getInstance();
+    try {
+      cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2).build();
+      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
+      dfsCluster.waitActive();
+      dfsCluster.transitionToActive(0);
+      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
+      for (int i = 0; i <= 10; i++) {
+        Path foo = new Path("/foo" + i);
+        dfs.mkdirs(foo);
+      }
+      cluster.getDfsCluster().getNameNodeRpc(0).rollEdits();
+      CountDownLatch ruEdit = new CountDownLatch(1);
+      CheckpointFaultInjector.set(new CheckpointFaultInjector() {
+        @Override
+        public void duringUploadInProgess()
+            throws IOException, InterruptedException {
+          if (ruEdit.getCount() == 1) {
+            ruEdit.countDown();
+            Thread.sleep(180000);
+          }
+        }
+      });
+      ruEdit.await();
+      RollingUpgradeInfo info = dfs
+          .rollingUpgrade(RollingUpgradeAction.PREPARE);
+      Assert.assertTrue(info.isStarted());
+      FSImage fsimage = dfsCluster.getNamesystem(0).getFSImage();
+      queryForPreparation(dfs);
+      // The NN should have a copy of the fsimage in case of rollbacks.
+      Assert.assertTrue(fsimage.hasRollbackFSImage());
+    } finally {
+      CheckpointFaultInjector.set(old);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   public void testCheckpoint(int nnCount) throws IOException, InterruptedException {
     final Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HDFS-12215. DataNode.transferBlock does not create its daemon in the xceiver thread group. (Lei Xu)

Posted by ra...@apache.org.
HDFS-12215. DataNode.transferBlock does not create its daemon in the xceiver thread group. (Lei Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36bada30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36bada30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36bada30

Branch: refs/heads/HDFS-10285
Commit: 36bada3032e438099ada9d865c3945d42c3e7c2a
Parents: f29a0fc
Author: Lei Xu <le...@apache.org>
Authored: Fri Aug 25 16:01:14 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Fri Aug 25 16:01:14 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36bada30/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6069487..1a85b46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2999,8 +2999,8 @@ public class DataNode extends ReconfigurableBase
     b.setNumBytes(visible);
 
     if (targets.length > 0) {
-      new DataTransfer(targets, targetStorageTypes, targetStorageIds, b, stage,
-          client).run();
+      new Daemon(new DataTransfer(targets, targetStorageTypes,
+          targetStorageIds, b, stage, client)).start();
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDFS-11243. [SPS]: Add a protocol command from NN to DN for dropping the SPS work and queues. Contributed by Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-11243. [SPS]: Add a protocol command from NN to DN for dropping the SPS work and queues. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69d8cccf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69d8cccf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69d8cccf

Branch: refs/heads/HDFS-10285
Commit: 69d8cccf0ed9f2742e9cc96c0ca40cf8b8741c5c
Parents: a27ab4a
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Tue Jan 31 23:44:01 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:16 2017 +0530

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 12 ++++
 .../server/blockmanagement/BlockManager.java    | 13 ++---
 .../blockmanagement/DatanodeDescriptor.java     | 18 ++++++
 .../server/blockmanagement/DatanodeManager.java | 19 +++++++
 .../hdfs/server/datanode/BPOfferService.java    |  4 ++
 .../datanode/BlockStorageMovementTracker.java   | 12 ++++
 .../datanode/StoragePolicySatisfyWorker.java    | 22 +++++++-
 .../server/namenode/StoragePolicySatisfier.java | 25 +++++++--
 .../hdfs/server/protocol/DatanodeProtocol.java  |  2 +
 .../server/protocol/DropSPSWorkCommand.java     | 36 ++++++++++++
 .../src/main/proto/DatanodeProtocol.proto       |  9 +++
 .../TestStoragePolicySatisfyWorker.java         | 59 ++++++++++++++++++++
 12 files changed, 216 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 156c9c2..3b38077 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdComma
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DropSPSWorkCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
@@ -108,6 +109,7 @@ import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DropSPSWorkCommand;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
@@ -139,6 +141,10 @@ public class PBHelper {
   private static final RegisterCommandProto REG_CMD_PROTO = 
       RegisterCommandProto.newBuilder().build();
   private static final RegisterCommand REG_CMD = new RegisterCommand();
+  private static final DropSPSWorkCommandProto DROP_SPS_WORK_CMD_PROTO =
+      DropSPSWorkCommandProto.newBuilder().build();
+  private static final DropSPSWorkCommand DROP_SPS_WORK_CMD =
+      new DropSPSWorkCommand();
 
   private PBHelper() {
     /** Hidden constructor */
@@ -474,6 +480,8 @@ public class PBHelper {
       return PBHelper.convert(proto.getBlkECReconstructionCmd());
     case BlockStorageMovementCommand:
       return PBHelper.convert(proto.getBlkStorageMovementCmd());
+    case DropSPSWorkCommand:
+      return DROP_SPS_WORK_CMD;
     default:
       return null;
     }
@@ -613,6 +621,10 @@ public class PBHelper {
           .setBlkStorageMovementCmd(
               convert((BlockStorageMovementCommand) datanodeCommand));
       break;
+    case DatanodeProtocol.DNA_DROP_SPS_WORK_COMMAND:
+      builder.setCmdType(DatanodeCommandProto.Type.DropSPSWorkCommand)
+          .setDropSPSWorkCmd(DROP_SPS_WORK_CMD_PROTO);
+      break;
     case DatanodeProtocol.DNA_UNKNOWN: //Not expected
     default:
       builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f053265..1a809a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -701,13 +701,13 @@ public class BlockManager implements BlockStatsMXBean {
     mxBeanName = MBeans.register("NameNode", "BlockStats", this);
     bmSafeMode.activate(blockTotal);
     if (sps != null && !haEnabled) {
-      sps.start();
+      sps.start(false);
     }
   }
 
   public void close() {
     if (sps != null) {
-      sps.stop();
+      sps.stop(false);
     }
     bmSafeMode.close();
     try {
@@ -4905,7 +4905,7 @@ public class BlockManager implements BlockStatsMXBean {
       return;
     }
 
-    sps.start();
+    sps.start(true);
   }
 
   /**
@@ -4919,12 +4919,7 @@ public class BlockManager implements BlockStatsMXBean {
       LOG.info("Storage policy satisfier is already stopped.");
       return;
     }
-    sps.stop();
-    // TODO: add command to DNs for stop in-progress processing SPS commands?
-    // to avoid confusions in cluster, I think sending commands from centralized
-    // place would be better to drop pending queues at DN. Anyway in progress
-    // work will be finished in a while, but this command can void starting
-    // fresh movements at DN.
+    sps.stop(true);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 9d3b544..95cb3a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -213,6 +213,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
    */
   private final Queue<BlockStorageMovementInfosBatch> storageMovementBlocks =
       new LinkedList<>();
+  private volatile boolean dropSPSWork = false;
 
   /* Variables for maintaining number of blocks scheduled to be written to
    * this storage. This count is approximate and might be slightly bigger
@@ -1054,4 +1055,21 @@ public class DatanodeDescriptor extends DatanodeInfo {
       return storageMovementBlocks.poll();
     }
   }
+
+  /**
+   * Set whether to drop SPS related queues at DN side.
+   *
+   * @param dropSPSWork
+   *          - true if need to drop SPS queues, otherwise false.
+   */
+  public synchronized void setDropSPSWork(boolean dropSPSWork) {
+    this.dropSPSWork = dropSPSWork;
+  }
+
+  /**
+   * @return true if need to drop SPS queues at DN.
+   */
+  public synchronized boolean shouldDropSPSWork() {
+    return this.dropSPSWork;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 048b68f..51c5aef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1748,6 +1748,13 @@ public class DatanodeManager {
           blkStorageMovementInfosBatch.getBlockMovingInfo()));
     }
 
+    if (nodeinfo.shouldDropSPSWork()) {
+      cmds.add(DropSPSWorkCommand.DNA_DROP_SPS_WORK_COMMAND);
+      // Set back to false to indicate that the new value has been sent to the
+      // datanode.
+      nodeinfo.setDropSPSWork(false);
+    }
+
     if (!cmds.isEmpty()) {
       return cmds.toArray(new DatanodeCommand[cmds.size()]);
     }
@@ -1976,5 +1983,17 @@ public class DatanodeManager {
     return slowDiskTracker != null ?
         slowDiskTracker.getSlowDiskReportAsJsonString() : null;
   }
+
+  /**
+   * Mark all DNs to drop SPS queues. A DNA_DROP_SPS_WORK_COMMAND will be added
+   * in heartbeat response, which will indicate DN to drop SPS queues
+   */
+  public void addDropSPSWorkCommandsToAllDNs() {
+    synchronized (this) {
+      for (DatanodeDescriptor dn : datanodeMap.values()) {
+        dn.setDropSPSWork(true);
+      }
+    }
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index c77fe2b..39ff4b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -788,6 +788,10 @@ class BPOfferService {
           blkSPSCmd.getTrackID(), blkSPSCmd.getBlockPoolId(),
           blkSPSCmd.getBlockMovingTasks());
       break;
+    case DatanodeProtocol.DNA_DROP_SPS_WORK_COMMAND:
+      LOG.info("DatanodeCommand action: DNA_DROP_SPS_WORK_COMMAND");
+      dn.getStoragePolicySatisfyWorker().dropSPSWork();
+      break;
     default:
       LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index bd35b09..e623cef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -146,4 +146,16 @@ public class BlockStorageMovementTracker implements Runnable {
       moverTaskFutures.notify();
     }
   }
+
+  /**
+   * Clear the pending movement and movement result queues.
+   */
+  void removeAll() {
+    synchronized (moverTaskFutures) {
+      moverTaskFutures.clear();
+    }
+    synchronized (movementResults) {
+      movementResults.clear();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 10adbfd..a96ac98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -115,7 +115,6 @@ public class StoragePolicySatisfyWorker {
         TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
         new Daemon.DaemonFactory() {
           private final AtomicInteger threadIndex = new AtomicInteger(0);
-
           @Override
           public Thread newThread(Runnable r) {
             Thread t = super.newThread(r);
@@ -421,10 +420,31 @@ public class StoragePolicySatisfyWorker {
         }
       }
     }
+
+    /**
+     * Clear the trackID vs movement status tracking map.
+     */
+    void removeAll() {
+      synchronized (trackIdVsMovementStatus) {
+        trackIdVsMovementStatus.clear();
+      }
+    }
+
   }
 
   @VisibleForTesting
   BlocksMovementsCompletionHandler getBlocksMovementsCompletionHandler() {
     return handler;
   }
+
+  /**
+   * Drop the in-progress SPS work queues.
+   */
+  public void dropSPSWork() {
+    LOG.info("Received request to drop StoragePolicySatisfierWorker queues. "
+        + "So, none of the SPS Worker queued block movements will"
+        + " be scheduled.");
+    movementTracker.removeAll();
+    handler.removeAll();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 1c48910..dc58294 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -99,9 +99,14 @@ public class StoragePolicySatisfier implements Runnable {
    * Start storage policy satisfier demon thread. Also start block storage
    * movements monitor for retry the attempts if needed.
    */
-  public synchronized void start() {
+  public synchronized void start(boolean reconfigStart) {
     isRunning = true;
-    LOG.info("Starting StoragePolicySatisfier.");
+    if (reconfigStart) {
+      LOG.info("Starting StoragePolicySatisfier, as admin requested to "
+          + "activate it.");
+    } else {
+      LOG.info("Starting StoragePolicySatisfier.");
+    }
     storagePolicySatisfierThread = new Daemon(this);
     storagePolicySatisfierThread.setName("StoragePolicySatisfier");
     storagePolicySatisfierThread.start();
@@ -110,10 +115,17 @@ public class StoragePolicySatisfier implements Runnable {
 
   /**
    * Stop storage policy satisfier demon thread.
+   *
+   * @param reconfigStop
    */
-  public synchronized void stop() {
+  public synchronized void stop(boolean reconfigStop) {
     isRunning = false;
-    LOG.info("Stopping StoragePolicySatisfier.");
+    if (reconfigStop) {
+      LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
+          + "deactivate it.");
+    } else {
+      LOG.info("Stopping StoragePolicySatisfier.");
+    }
     if (storagePolicySatisfierThread == null) {
       return;
     }
@@ -123,7 +135,10 @@ public class StoragePolicySatisfier implements Runnable {
     } catch (InterruptedException ie) {
     }
     this.storageMovementsMonitor.stop();
-    this.clearQueues();
+    if (reconfigStop) {
+      this.clearQueues();
+      this.blockManager.getDatanodeManager().addDropSPSWorkCommandsToAllDNs();
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index 858f59b..892efb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -80,6 +80,8 @@ public interface DatanodeProtocol {
   final static int DNA_UNCACHE = 10;   // uncache blocks
   final static int DNA_ERASURE_CODING_RECONSTRUCTION = 11; // erasure coding reconstruction command
   final static int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command
+  final static int DNA_DROP_SPS_WORK_COMMAND = 13; // block storage movement
+                                                   // command
 
   /** 
    * Register Datanode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DropSPSWorkCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DropSPSWorkCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DropSPSWorkCommand.java
new file mode 100644
index 0000000..806f713
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DropSPSWorkCommand.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A DropSPSWorkCommand is an instruction to a datanode to drop the SPSWorker's
+ * pending block storage movement queues.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DropSPSWorkCommand extends DatanodeCommand {
+  public static final DropSPSWorkCommand DNA_DROP_SPS_WORK_COMMAND =
+      new DropSPSWorkCommand();
+
+  public DropSPSWorkCommand() {
+    super(DatanodeProtocol.DNA_DROP_SPS_WORK_COMMAND);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index 77b0f86..899dc7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -61,6 +61,7 @@ message DatanodeCommandProto {
     BlockIdCommand = 8;
     BlockECReconstructionCommand = 9;
     BlockStorageMovementCommand = 10;
+    DropSPSWorkCommand = 11;
   }
 
   required Type cmdType = 1;    // Type of the command
@@ -76,6 +77,7 @@ message DatanodeCommandProto {
   optional BlockIdCommandProto blkIdCmd = 8;
   optional BlockECReconstructionCommandProto blkECReconstructionCmd = 9;
   optional BlockStorageMovementCommandProto blkStorageMovementCmd = 10;
+  optional DropSPSWorkCommandProto dropSPSWorkCmd = 11;
 }
 
 /**
@@ -166,6 +168,13 @@ message BlockStorageMovementCommandProto {
 }
 
 /**
+ * Instruct datanode to drop SPS work queues
+ */
+message DropSPSWorkCommandProto {
+  // void
+}
+
+/**
  * Block storage movement information
  */
 message BlockStorageMovementProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d8cccf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
index 8e02d41..86b8b50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.*;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -186,6 +188,63 @@ public class TestStoragePolicySatisfyWorker {
     waitForBlockMovementCompletion(worker, inode.getId(), 1, 30000);
   }
 
+  /**
+   * Tests that drop SPS work method clears all the queues.
+   *
+   * @throws Exception
+   */
+  @Test(timeout = 120000)
+  public void testDropSPSWork() throws Exception {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(20).build();
+
+    cluster.waitActive();
+    final DistributedFileSystem dfs = cluster.getFileSystem();
+    final String file = "/testDropSPSWork";
+    DFSTestUtil.createFile(dfs, new Path(file), false, 1024, 50 * 100,
+        DEFAULT_BLOCK_SIZE, (short) 2, 0, false, null);
+
+    // move to ARCHIVE
+    dfs.setStoragePolicy(new Path(file), "COLD");
+
+    DataNode src = cluster.getDataNodes().get(2);
+    DatanodeInfo targetDnInfo =
+        DFSTestUtil.getLocalDatanodeInfo(src.getXferPort());
+
+    StoragePolicySatisfyWorker worker =
+        new StoragePolicySatisfyWorker(conf, src);
+    List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
+    List<LocatedBlock> locatedBlocks =
+        dfs.getClient().getLocatedBlocks(file, 0).getLocatedBlocks();
+    for (LocatedBlock locatedBlock : locatedBlocks) {
+      BlockMovingInfo blockMovingInfo =
+          prepareBlockMovingInfo(locatedBlock.getBlock().getLocalBlock(),
+              locatedBlock.getLocations()[0], targetDnInfo,
+              locatedBlock.getStorageTypes()[0], StorageType.ARCHIVE);
+      blockMovingInfos.add(blockMovingInfo);
+    }
+    INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
+    worker.processBlockMovingTasks(inode.getId(),
+        cluster.getNamesystem().getBlockPoolId(), blockMovingInfos);
+    // Wait till results queue build up
+    waitForBlockMovementResult(worker, inode.getId(), 30000);
+    worker.dropSPSWork();
+    assertTrue(worker.getBlocksMovementsCompletionHandler()
+        .getBlksMovementResults().size() == 0);
+  }
+
+  private void waitForBlockMovementResult(
+      final StoragePolicySatisfyWorker worker, final long inodeId, int timeout)
+          throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        List<BlocksStorageMovementResult> completedBlocks = worker
+            .getBlocksMovementsCompletionHandler().getBlksMovementResults();
+        return completedBlocks.size() > 0;
+      }
+    }, 100, timeout);
+  }
+
   private void waitForBlockMovementCompletion(
       final StoragePolicySatisfyWorker worker, final long inodeId,
       int expectedFailedItemsCount, int timeout) throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-7087. NM failed to perform log aggregation due to absent container. Contributed by Jason Lowe.

Posted by ra...@apache.org.
YARN-7087. NM failed to perform log aggregation due to absent container. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e864f814
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e864f814
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e864f814

Branch: refs/heads/HDFS-10285
Commit: e864f81471407a384395fefe1ceb3b66fc7f87f2
Parents: 39a9dc8
Author: Eric Payne <ep...@apache.org>
Authored: Fri Aug 25 15:37:54 2017 -0500
Committer: Eric Payne <ep...@apache.org>
Committed: Fri Aug 25 15:37:54 2017 -0500

----------------------------------------------------------------------
 .../container/ContainerImpl.java                |   2 +-
 .../logaggregation/LogAggregationService.java   |  13 +--
 .../event/LogHandlerContainerFinishedEvent.java |   9 +-
 .../TestLogAggregationService.java              | 105 +++++++++----------
 .../TestNonAggregatingLogHandler.java           |  10 +-
 5 files changed, 69 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e864f814/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 8e42133..6af8653 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -703,7 +703,7 @@ public class ContainerImpl implements Container {
     eventHandler.handle(new ContainerStopMonitoringEvent(containerId));
     // Tell the logService too
     eventHandler.handle(new LogHandlerContainerFinishedEvent(
-      containerId, exitCode));
+        containerId, containerTokenIdentifier.getContainerType(), exitCode));
   }
 
   @SuppressWarnings("unchecked") // dispatcher not typed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e864f814/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index 1a59e45..4938939 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
@@ -287,7 +286,8 @@ public class LogAggregationService extends AbstractService implements
     return this.appLogAggregators.size();
   }
 
-  private void stopContainer(ContainerId containerId, int exitCode) {
+  private void stopContainer(ContainerId containerId,
+      ContainerType containerType, int exitCode) {
 
     // A container is complete. Put this containers' logs up for aggregation if
     // this containers' logs are needed.
@@ -298,14 +298,6 @@ public class LogAggregationService extends AbstractService implements
           + ", did it fail to start?");
       return;
     }
-    Container container = context.getContainers().get(containerId);
-    if (null == container) {
-      LOG.warn("Log aggregation cannot be started for " + containerId
-          + ", as its an absent container");
-      return;
-    }
-    ContainerType containerType =
-        container.getContainerTokenIdentifier().getContainerType();
     aggregator.startContainerLogAggregation(
         new ContainerLogContext(containerId, containerType, exitCode));
   }
@@ -344,6 +336,7 @@ public class LogAggregationService extends AbstractService implements
         LogHandlerContainerFinishedEvent containerFinishEvent =
             (LogHandlerContainerFinishedEvent) event;
         stopContainer(containerFinishEvent.getContainerId(),
+            containerFinishEvent.getContainerType(),
             containerFinishEvent.getExitCode());
         break;
       case APPLICATION_FINISHED:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e864f814/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java
index 038006e..3f4b6a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java
@@ -19,16 +19,19 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
 
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 
 public class LogHandlerContainerFinishedEvent extends LogHandlerEvent {
 
   private final ContainerId containerId;
+  private final ContainerType containerType;
   private final int exitCode;
 
   public LogHandlerContainerFinishedEvent(ContainerId containerId,
-      int exitCode) {
+      ContainerType containerType, int exitCode) {
     super(LogHandlerEventType.CONTAINER_FINISHED);
     this.containerId = containerId;
+    this.containerType = containerType;
     this.exitCode = exitCode;
   }
 
@@ -36,6 +39,10 @@ public class LogHandlerContainerFinishedEvent extends LogHandlerEvent {
     return this.containerId;
   }
 
+  public ContainerType getContainerType() {
+    return containerType;
+  }
+
   public int getExitCode() {
     return this.exitCode;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e864f814/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 0fa012c..a3e01af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -141,7 +141,6 @@ import org.eclipse.jetty.util.MultiException;
 import com.google.common.base.Supplier;
 import org.slf4j.LoggerFactory;
 
-//@Ignore
 public class TestLogAggregationService extends BaseContainerManagerTest {
 
   private Map<ApplicationAccessType, String> acls = createAppAcls();
@@ -208,13 +207,13 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
 
     ApplicationAttemptId appAttemptId =
         BuilderUtils.newApplicationAttemptId(application1, 1);
-    ContainerId container11 = createContainer(appAttemptId, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container11 = ContainerId.newContainerId(appAttemptId, 1);
     // Simulate log-file creation
     writeContainerLogs(app1LogDir, container11, new String[] { "stdout",
         "stderr", "syslog" });
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container11, 0));
+        new LogHandlerContainerFinishedEvent(container11,
+            ContainerType.APPLICATION_MASTER, 0));
 
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         application1));
@@ -332,11 +331,11 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
 
     ApplicationAttemptId appAttemptId =
         BuilderUtils.newApplicationAttemptId(app, 1);
-    ContainerId cont = createContainer(appAttemptId, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId cont = ContainerId.newContainerId(appAttemptId, 1);
     writeContainerLogs(appLogDir, cont, new String[] { "stdout",
         "stderr", "syslog" });
-    logAggregationService.handle(new LogHandlerContainerFinishedEvent(cont, 0));
+    logAggregationService.handle(new LogHandlerContainerFinishedEvent(cont,
+        ContainerType.APPLICATION_MASTER, 0));
     logAggregationService.handle(new LogHandlerAppFinishedEvent(app));
     logAggregationService.stop();
     delSrvc.stop();
@@ -421,13 +420,13 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
 
     ApplicationAttemptId appAttemptId1 =
         BuilderUtils.newApplicationAttemptId(application1, 1);
-    ContainerId container11 = createContainer(appAttemptId1, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container11 = ContainerId.newContainerId(appAttemptId1, 1);
 
     // Simulate log-file creation
     writeContainerLogs(app1LogDir, container11, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container11, 0));
+        new LogHandlerContainerFinishedEvent(container11,
+            ContainerType.APPLICATION_MASTER, 0));
 
     ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2);
     ApplicationAttemptId appAttemptId2 =
@@ -444,19 +443,19 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     logAggregationService.handle(new LogHandlerAppStartedEvent(
         application2, this.user, null, this.acls, contextWithAMOnly));
 
-    ContainerId container21 = createContainer(appAttemptId2, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container21 = ContainerId.newContainerId(appAttemptId2, 1);
 
     writeContainerLogs(app2LogDir, container21, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container21, 0));
+        new LogHandlerContainerFinishedEvent(container21,
+            ContainerType.APPLICATION_MASTER, 0));
 
-    ContainerId container12 = createContainer(appAttemptId1, 2,
-        ContainerType.TASK);
+    ContainerId container12 = ContainerId.newContainerId(appAttemptId1, 2);
 
     writeContainerLogs(app1LogDir, container12, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container12, 0));
+        new LogHandlerContainerFinishedEvent(container12,
+            ContainerType.TASK, 0));
 
     ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3);
     ApplicationAttemptId appAttemptId3 =
@@ -488,29 +487,29 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     checkEvents(appEventHandler, expectedInitEvents, false, "getType", "getApplicationID");
     reset(appEventHandler);
     
-    ContainerId container31 = createContainer(appAttemptId3, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container31 = ContainerId.newContainerId(appAttemptId3, 1);
     writeContainerLogs(app3LogDir, container31, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container31, 0));
+        new LogHandlerContainerFinishedEvent(container31,
+            ContainerType.APPLICATION_MASTER, 0));
 
-    ContainerId container32 = createContainer(appAttemptId3, 2,
-        ContainerType.TASK);
+    ContainerId container32 = ContainerId.newContainerId(appAttemptId3, 2);
     writeContainerLogs(app3LogDir, container32, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container32, 1)); // Failed 
+        new LogHandlerContainerFinishedEvent(container32,
+            ContainerType.TASK, 1)); // Failed
 
-    ContainerId container22 = createContainer(appAttemptId2, 2,
-        ContainerType.TASK);
+    ContainerId container22 = ContainerId.newContainerId(appAttemptId2, 2);
     writeContainerLogs(app2LogDir, container22, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container22, 0));
+        new LogHandlerContainerFinishedEvent(container22,
+            ContainerType.TASK, 0));
 
-    ContainerId container33 = createContainer(appAttemptId3, 3,
-        ContainerType.TASK);
+    ContainerId container33 = ContainerId.newContainerId(appAttemptId3, 3);
     writeContainerLogs(app3LogDir, container33, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container33, 0));
+        new LogHandlerContainerFinishedEvent(container33,
+            ContainerType.TASK, 0));
 
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         application2));
@@ -800,7 +799,8 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     // verify trying to collect logs for containers/apps we don't know about
     // doesn't blow up and tear down the NM
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-        BuilderUtils.newContainerId(4, 1, 1, 1), 0));
+        BuilderUtils.newContainerId(4, 1, 1, 1),
+        ContainerType.APPLICATION_MASTER, 0));
     dispatcher.await();
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         BuilderUtils.newApplicationId(1, 5)));
@@ -866,7 +866,8 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     // verify trying to collect logs for containers/apps we don't know about
     // doesn't blow up and tear down the NM
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-        BuilderUtils.newContainerId(4, 1, 1, 1), 0));
+        BuilderUtils.newContainerId(4, 1, 1, 1),
+        ContainerType.APPLICATION_MASTER, 0));
     dispatcher.await();
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         BuilderUtils.newApplicationId(1, 5)));
@@ -1528,14 +1529,13 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
 
     ApplicationAttemptId appAttemptId1 =
         BuilderUtils.newApplicationAttemptId(application1, 1);
-    ContainerId container1 = createContainer(appAttemptId1, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container1 = ContainerId.newContainerId(appAttemptId1, 1);
 
     // Simulate log-file creation
     writeContainerLogs(appLogDir1, container1, new String[] { "stdout",
         "stderr", "syslog" });
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-      container1, 0));
+        container1, ContainerType.APPLICATION_MASTER, 0));
 
     // LogContext for application2 has excludePatten which includes
     // stdout and syslog.
@@ -1551,13 +1551,13 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
         AMOnlyLogAggregationPolicy.class.getName());
     logAggregationService.handle(new LogHandlerAppStartedEvent(application2,
       this.user, null, this.acls, LogAggregationContextWithExcludePatterns));
-    ContainerId container2 = createContainer(appAttemptId2, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container2 = ContainerId.newContainerId(appAttemptId2, 1);
 
     writeContainerLogs(app2LogDir, container2, new String[] { "stdout",
         "stderr", "syslog" });
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container2, 0));
+        new LogHandlerContainerFinishedEvent(container2,
+            ContainerType.APPLICATION_MASTER, 0));
 
     // LogContext for application3 has includePattern which is *.log and
     // excludePatten which includes std.log and sys.log.
@@ -1576,12 +1576,12 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
         AMOnlyLogAggregationPolicy.class.getName());
     logAggregationService.handle(new LogHandlerAppStartedEvent(application3,
       this.user, null, this.acls, context1));
-    ContainerId container3 = createContainer(appAttemptId3, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container3 = ContainerId.newContainerId(appAttemptId3, 1);
     writeContainerLogs(app3LogDir, container3, new String[] { "stdout",
         "sys.log", "std.log", "out.log", "err.log", "log" });
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container3, 0));
+        new LogHandlerContainerFinishedEvent(container3,
+            ContainerType.APPLICATION_MASTER, 0));
 
     // LogContext for application4 has includePattern
     // which includes std.log and sys.log and
@@ -1601,12 +1601,12 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
         AMOnlyLogAggregationPolicy.class.getName());
     logAggregationService.handle(new LogHandlerAppStartedEvent(application4,
       this.user, null, this.acls, context2));
-    ContainerId container4 = createContainer(appAttemptId4, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container4 = ContainerId.newContainerId(appAttemptId4, 1);
     writeContainerLogs(app4LogDir, container4, new String[] { "stdout",
         "sys.log", "std.log", "out.log", "err.log", "log" });
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container4, 0));
+        new LogHandlerContainerFinishedEvent(container4,
+            ContainerType.APPLICATION_MASTER, 0));
 
     dispatcher.await();
     ApplicationEvent expectedInitEvents[] =
@@ -1733,7 +1733,8 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
         new ContainerId[] {container}, logFiles, 1, true);
 
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container, 0));
+        new LogHandlerContainerFinishedEvent(container,
+            ContainerType.APPLICATION_MASTER, 0));
 
     dispatcher.await();
 
@@ -1857,14 +1858,8 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     ApplicationAttemptId appAttemptId1 =
         BuilderUtils.newApplicationAttemptId(appId, 1);
     ContainerId containerId = BuilderUtils.newContainerId(appAttemptId1, 2l);
-    try {
-      logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-          containerId, 100));
-      assertTrue("Should skip when null containerID", true);
-    } catch (Exception e) {
-      Assert.assertFalse("Exception not expected should skip null containerid",
-          true);
-    }
+    logAggregationService.handle(new LogHandlerContainerFinishedEvent(
+        containerId, ContainerType.APPLICATION_MASTER, 100));
   }
 
   @Test (timeout = 50000)
@@ -2189,8 +2184,7 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
       long cId, int exitCode, String[] logFiles) throws IOException {
     ApplicationAttemptId appAttemptId1 =
         BuilderUtils.newApplicationAttemptId(application1, 1);
-    ContainerId containerId = createContainer(appAttemptId1, cId,
-        containerType);
+    ContainerId containerId = ContainerId.newContainerId(appAttemptId1, cId);
     // Simulate log-file creation
     File appLogDir1 =
         new File(localLogDir, application1.toString());
@@ -2198,7 +2192,7 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     writeContainerLogs(appLogDir1, containerId, logFiles);
 
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-        containerId, exitCode));
+        containerId, containerType, exitCode));
     return containerId;
 
   }
@@ -2388,7 +2382,8 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     writeContainerLogs(appLogDir, container, logFiles3);
 
     logAggregationService.handle(
-      new LogHandlerContainerFinishedEvent(container, 0));
+        new LogHandlerContainerFinishedEvent(container,
+            ContainerType.APPLICATION_MASTER, 0));
 
     dispatcher.await();
     logAggregationService.handle(new LogHandlerAppFinishedEvent(application));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e864f814/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
index 7a4ea88..c6fa16d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.InlineDispatcher;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
@@ -156,7 +157,8 @@ public class TestNonAggregatingLogHandler {
 
     logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, null));
 
-    logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0));
+    logHandler.handle(new LogHandlerContainerFinishedEvent(container11,
+        ContainerType.APPLICATION_MASTER, 0));
 
     logHandler.handle(new LogHandlerAppFinishedEvent(appId));
 
@@ -196,7 +198,8 @@ public class TestNonAggregatingLogHandler {
 
     logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, null));
 
-    logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0));
+    logHandler.handle(new LogHandlerContainerFinishedEvent(container11,
+        ContainerType.APPLICATION_MASTER, 0));
 
     logHandler.handle(new LogHandlerAppFinishedEvent(appId));
 
@@ -365,7 +368,8 @@ public class TestNonAggregatingLogHandler {
     logHandler.start();
 
     logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, null));
-    logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0));
+    logHandler.handle(new LogHandlerContainerFinishedEvent(container11,
+        ContainerType.APPLICATION_MASTER, 0));
     logHandler.handle(new LogHandlerAppFinishedEvent(appId));
 
     // simulate a restart and verify deletion is rescheduled


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HDFS-11068: [SPS]: Provide unique trackID to track the block movement sends to coordinator. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-11068: [SPS]: Provide unique trackID to track the block movement sends to coordinator. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7a29eb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7a29eb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7a29eb2

Branch: refs/heads/HDFS-10285
Commit: b7a29eb23f12a7c33ed33a848dbd7255cfb59498
Parents: 054d092
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Fri Nov 11 01:17:50 2016 -0800
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:28 2017 +0530

----------------------------------------------------------------------
 .../blockmanagement/DatanodeDescriptor.java     | 31 +++++--
 .../server/blockmanagement/DatanodeManager.java | 16 ++--
 .../BlockStorageMovementInfosBatch.java         | 61 +++++++++++++
 .../server/namenode/StoragePolicySatisfier.java |  5 +-
 .../protocol/BlocksStorageMovementResult.java   |  6 ++
 .../namenode/TestStoragePolicySatisfier.java    | 95 +++++++++++++++-----
 6 files changed, 174 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7a29eb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 1db002b..9d3b544 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -41,9 +41,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementInfosBatch;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
@@ -207,8 +207,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
   private final LightWeightHashSet<Block> invalidateBlocks =
       new LightWeightHashSet<>();
 
-  /** A queue of blocks for moving its storage placements by this datanode. */
-  private final Queue<List<BlockMovingInfo>> storageMovementBlocks =
+  /**
+   * A queue of blocks corresponding to trackID for moving its storage
+   * placements by this datanode.
+   */
+  private final Queue<BlockStorageMovementInfosBatch> storageMovementBlocks =
       new LinkedList<>();
 
   /* Variables for maintaining number of blocks scheduled to be written to
@@ -1025,18 +1028,30 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /**
    * Add the block infos which needs to move its storage locations.
    *
+   * @param trackID
+   *          - unique identifier which will be used for tracking the given set
+   *          of blocks movement completion.
    * @param storageMismatchedBlocks
    *          - storage mismatched block infos
    */
-  public void addBlocksToMoveStorage(
+  public void addBlocksToMoveStorage(long trackID,
       List<BlockMovingInfo> storageMismatchedBlocks) {
-    storageMovementBlocks.offer(storageMismatchedBlocks);
+    synchronized (storageMovementBlocks) {
+      storageMovementBlocks.offer(
+          new BlockStorageMovementInfosBatch(trackID, storageMismatchedBlocks));
+    }
   }
 
   /**
-   * @return block infos which needs to move its storage locations.
+   * @return block infos which needs to move its storage locations. This returns
+   *         list of blocks under one trackId.
    */
-  public List<BlockMovingInfo> getBlocksToMoveStorages() {
-    return storageMovementBlocks.poll();
+  public BlockStorageMovementInfosBatch getBlocksToMoveStorages() {
+    synchronized (storageMovementBlocks) {
+      // TODO: Presently returning the list of blocks under one trackId.
+      // Need to limit the list of items into small batches with in trackId
+      // itself if blocks are many(For example: a file contains many blocks).
+      return storageMovementBlocks.poll();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7a29eb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 6a5cfbe..048b68f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
 import org.apache.hadoop.hdfs.server.common.Util;
+import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementInfosBatch;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
@@ -47,7 +48,6 @@ import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
@@ -1738,16 +1738,14 @@ public class DatanodeManager {
     }
 
     // check pending block storage movement tasks
-    List<BlockMovingInfo> pendingBlockMovementList = nodeinfo
+    BlockStorageMovementInfosBatch blkStorageMovementInfosBatch = nodeinfo
         .getBlocksToMoveStorages();
-    if (pendingBlockMovementList != null) {
-      // TODO: trackID is used to track the block movement sends to coordinator
-      // datanode. Need to implement tracking logic. Temporarily, using a
-      // constant value -1.
-      long trackID = -1;
+
+    if (blkStorageMovementInfosBatch != null) {
       cmds.add(new BlockStorageMovementCommand(
-          DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT, trackID, blockPoolId,
-          pendingBlockMovementList));
+          DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT,
+          blkStorageMovementInfosBatch.getTrackID(), blockPoolId,
+          blkStorageMovementInfosBatch.getBlockMovingInfo()));
     }
 
     if (!cmds.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7a29eb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementInfosBatch.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementInfosBatch.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementInfosBatch.java
new file mode 100644
index 0000000..a790c13
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementInfosBatch.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.List;
+
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+
+/**
+ * This class represents a batch of blocks under one trackId which needs to move
+ * its storage locations to satisfy the storage policy.
+ */
+public class BlockStorageMovementInfosBatch {
+  private long trackID;
+  private List<BlockMovingInfo> blockMovingInfos;
+
+  /**
+   * Constructor to create the block storage movement infos batch.
+   *
+   * @param trackID
+   *          - unique identifier which will be used for tracking the given set
+   *          of blocks movement.
+   * @param blockMovingInfos
+   *          - list of block to storage infos.
+   */
+  public BlockStorageMovementInfosBatch(long trackID,
+      List<BlockMovingInfo> blockMovingInfos) {
+    this.trackID = trackID;
+    this.blockMovingInfos = blockMovingInfos;
+  }
+
+  public long getTrackID() {
+    return trackID;
+  }
+
+  public List<BlockMovingInfo> getBlockMovingInfo() {
+    return blockMovingInfos;
+  }
+
+  @Override
+  public String toString() {
+    return new StringBuilder().append("BlockStorageMovementInfosBatch(\n  ")
+        .append("TrackID: ").append(trackID).append("  BlockMovingInfos: ")
+        .append(blockMovingInfos).append(")").toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7a29eb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 6fa9302..4967a89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -217,7 +217,10 @@ public class StoragePolicySatisfier implements Runnable {
       // chances, then we can just retry limited number of times and exit.
       return;
     }
-    coordinatorNode.addBlocksToMoveStorage(blockMovingInfos);
+
+    // 'BlockCollectionId' is used as the tracking ID. All the blocks under this
+    // blockCollectionID will be added to this datanode.
+    coordinatorNode.addBlocksToMoveStorage(blockCollectionID, blockMovingInfos);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7a29eb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
index 1afba34..713b83b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
@@ -61,4 +61,10 @@ public class BlocksStorageMovementResult {
     return status;
   }
 
+  @Override
+  public String toString() {
+    return new StringBuilder().append("BlocksStorageMovementResult(\n  ")
+        .append("track id: ").append(trackId).append("  status: ")
+        .append(status).append(")").toString();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7a29eb2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 6f5c717..e84052f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -54,15 +56,15 @@ public class TestStoragePolicySatisfier {
   final private int storagesPerDatanode = 2;
   final private long capacity = 2 * 256 * 1024 * 1024;
   final private String file = "/testMoveWhenStoragePolicyNotSatisfying";
-  private DistributedFileSystem distributedFS = null;
+  private DistributedFileSystem dfs = null;
 
   @Before
   public void setUp() throws IOException {
     config.setLong("dfs.block.size", 1024);
     hdfsCluster = startCluster(config, allDiskTypes, numOfDatanodes,
         storagesPerDatanode, capacity);
-    distributedFS = hdfsCluster.getFileSystem();
-    writeContent(distributedFS, file);
+    dfs = hdfsCluster.getFileSystem();
+    writeContent(file);
   }
 
   @Test(timeout = 300000)
@@ -71,7 +73,7 @@ public class TestStoragePolicySatisfier {
 
     try {
       // Change policy to ALL_SSD
-      distributedFS.setStoragePolicy(new Path(file), "COLD");
+      dfs.setStoragePolicy(new Path(file), "COLD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -86,8 +88,7 @@ public class TestStoragePolicySatisfier {
 
       hdfsCluster.triggerHeartbeats();
       // Wait till namenode notified about the block location details
-      waitExpectedStorageType(file, StorageType.ARCHIVE, distributedFS, 3,
-          30000);
+      waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 30000);
     } finally {
       hdfsCluster.shutdown();
     }
@@ -98,7 +99,7 @@ public class TestStoragePolicySatisfier {
       throws Exception {
     try {
       // Change policy to ALL_SSD
-      distributedFS.setStoragePolicy(new Path(file), "ALL_SSD");
+      dfs.setStoragePolicy(new Path(file), "ALL_SSD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -115,7 +116,7 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier Identified that block to move to SSD
       // areas
-      waitExpectedStorageType(file, StorageType.SSD, distributedFS, 3, 30000);
+      waitExpectedStorageType(file, StorageType.SSD, 3, 30000);
     } finally {
       hdfsCluster.shutdown();
     }
@@ -126,7 +127,7 @@ public class TestStoragePolicySatisfier {
       throws Exception {
     try {
       // Change policy to ONE_SSD
-      distributedFS.setStoragePolicy(new Path(file), "ONE_SSD");
+      dfs.setStoragePolicy(new Path(file), "ONE_SSD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -141,8 +142,8 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
       // Wait till StorgePolicySatisfier Identified that block to move to SSD
       // areas
-      waitExpectedStorageType(file, StorageType.SSD, distributedFS, 1, 30000);
-      waitExpectedStorageType(file, StorageType.DISK, distributedFS, 2, 30000);
+      waitExpectedStorageType(file, StorageType.SSD, 1, 30000);
+      waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
     } finally {
       hdfsCluster.shutdown();
     }
@@ -156,7 +157,7 @@ public class TestStoragePolicySatisfier {
   public void testPerTrackIdBlocksStorageMovementResults() throws Exception {
     try {
       // Change policy to ONE_SSD
-      distributedFS.setStoragePolicy(new Path(file), "ONE_SSD");
+      dfs.setStoragePolicy(new Path(file), "ONE_SSD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
 
@@ -171,8 +172,8 @@ public class TestStoragePolicySatisfier {
       hdfsCluster.triggerHeartbeats();
 
       // Wait till the block is moved to SSD areas
-      waitExpectedStorageType(file, StorageType.SSD, distributedFS, 1, 30000);
-      waitExpectedStorageType(file, StorageType.DISK, distributedFS, 2, 30000);
+      waitExpectedStorageType(file, StorageType.SSD, 1, 30000);
+      waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
 
       waitForBlocksMovementResult(1, 30000);
     } finally {
@@ -180,7 +181,58 @@ public class TestStoragePolicySatisfier {
     }
   }
 
-  private void waitForBlocksMovementResult(int expectedResultsCount,
+  /**
+   * Tests to verify that multiple files are giving to satisfy storage policy
+   * and should work well altogether.
+   */
+  @Test(timeout = 300000)
+  public void testMultipleFilesForSatisfyStoragePolicy() throws Exception {
+    List<String> files = new ArrayList<>();
+    files.add(file);
+
+    // Creates 4 more files. Send all of them for satisfying the storage policy
+    // together.
+    for (int i = 0; i < 4; i++) {
+      String file1 = "/testMoveWhenStoragePolicyNotSatisfying_" + i;
+      files.add(file1);
+      writeContent(file1);
+    }
+
+    try {
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      List<Long> blockCollectionIds = new ArrayList<>();
+      // Change policy to ONE_SSD
+      for (String fileName : files) {
+        dfs.setStoragePolicy(new Path(fileName), "ONE_SSD");
+        INode inode = namesystem.getFSDirectory().getINode(fileName);
+        blockCollectionIds.add(inode.getId());
+      }
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
+
+      // Making sure SDD based nodes added to cluster. Adding SSD based
+      // datanodes.
+      startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+      for (long inodeId : blockCollectionIds) {
+        namesystem.getBlockManager().satisfyStoragePolicy(inodeId);
+      }
+      hdfsCluster.triggerHeartbeats();
+
+      for (String fileName : files) {
+        // Wait till the block is moved to SSD areas
+        waitExpectedStorageType(fileName, StorageType.SSD, 1, 30000);
+        waitExpectedStorageType(fileName, StorageType.DISK, 2, 30000);
+      }
+
+      waitForBlocksMovementResult(blockCollectionIds.size(), 30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  private void waitForBlocksMovementResult(long expectedBlkMovResultsCount,
       int timeout) throws TimeoutException, InterruptedException {
     BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
     final StoragePolicySatisfier sps = blockManager.getStoragePolicySatisfier();
@@ -188,16 +240,15 @@ public class TestStoragePolicySatisfier {
       @Override
       public Boolean get() {
         LOG.info("expectedResultsCount={} actualResultsCount={}",
-            expectedResultsCount,
+            expectedBlkMovResultsCount,
             sps.getAttemptedItemsMonitor().resultsCount());
-        return expectedResultsCount == sps.getAttemptedItemsMonitor()
-            .resultsCount();
+        return sps.getAttemptedItemsMonitor()
+            .resultsCount() == expectedBlkMovResultsCount;
       }
     }, 100, timeout);
   }
 
-  private void writeContent(final DistributedFileSystem dfs,
-      final String fileName) throws IOException {
+  private void writeContent(final String fileName) throws IOException {
     // write to DISK
     final FSDataOutputStream out = dfs.create(new Path(fileName));
     for (int i = 0; i < 1000; i++) {
@@ -243,8 +294,8 @@ public class TestStoragePolicySatisfier {
   // Check whether the Block movement has been successfully completed to satisfy
   // the storage policy for the given file.
   private void waitExpectedStorageType(final String fileName,
-      final StorageType expectedStorageType, final DistributedFileSystem dfs,
-      int expectedStorageCount, int timeout) throws Exception {
+      final StorageType expectedStorageType, int expectedStorageCount,
+      int timeout) throws Exception {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HDFS-11334: [SPS]: NN switch and rescheduling movements can lead to have more than one coordinator for same file blocks. Contributed by Rakesh R.

Posted by ra...@apache.org.
HDFS-11334: [SPS]: NN switch and rescheduling movements can lead to have more than one coordinator for same file blocks. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ade0d041
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ade0d041
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ade0d041

Branch: refs/heads/HDFS-10285
Commit: ade0d041d86a11ff05e2d59c140faec2e1c3999e
Parents: 2dac125
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Tue Apr 18 15:23:58 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:36 2017 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +
 .../server/blockmanagement/DatanodeManager.java |  12 ++
 .../hdfs/server/datanode/BPServiceActor.java    |   4 +-
 .../datanode/BlockStorageMovementTracker.java   |  37 +++-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  12 +-
 .../datanode/StoragePolicySatisfyWorker.java    |  95 +++++++++--
 .../BlockStorageMovementAttemptedItems.java     |  80 ++++++---
 .../server/namenode/StoragePolicySatisfier.java |  15 +-
 .../protocol/BlocksStorageMovementResult.java   |   6 +-
 .../src/main/proto/DatanodeProtocol.proto       |   1 +
 .../TestStoragePolicySatisfyWorker.java         |  68 ++++----
 .../TestStoragePolicySatisfierWithHA.java       | 170 +++++++++++++++++--
 13 files changed, 413 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bf93591..d8c53ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -552,7 +552,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
       "dfs.storage.policy.satisfier.self.retry.timeout.millis";
   public static final int DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
-      30 * 60 * 1000;
+      20 * 60 * 1000;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
   public static final int     DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 3b38077..5044c0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -981,6 +981,9 @@ public class PBHelper {
       case FAILURE:
         status = Status.FAILURE;
         break;
+      case IN_PROGRESS:
+        status = Status.IN_PROGRESS;
+        break;
       default:
         throw new AssertionError("Unknown status: " + resultProto.getStatus());
       }
@@ -1007,6 +1010,9 @@ public class PBHelper {
       case FAILURE:
         status = BlocksStorageMovementResultProto.Status.FAILURE;
         break;
+      case IN_PROGRESS:
+        status = BlocksStorageMovementResultProto.Status.IN_PROGRESS;
+        break;
       default:
         throw new AssertionError("Unknown status: " + report.getStatus());
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 51c5aef..a298843 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1089,6 +1089,18 @@ public class DatanodeManager {
           nodeS.setSoftwareVersion(nodeReg.getSoftwareVersion());
           nodeS.setDisallowed(false); // Node is in the include list
 
+          // Sets dropSPSWork flag to true, to ensure that
+          // DNA_DROP_SPS_WORK_COMMAND will send to datanode via next heartbeat
+          // response immediately after the node registration. This is
+          // to avoid a situation, where multiple trackId responses coming from
+          // different co-odinator datanodes. After SPS monitor time out, it
+          // will retry the files which were scheduled to the disconnected(for
+          // long time more than heartbeat expiry) DN, by finding new
+          // co-ordinator datanode. Now, if the expired datanode reconnects back
+          // after SPS reschedules, it leads to get different movement results
+          // from reconnected and new DN co-ordinators.
+          nodeS.setDropSPSWork(true);
+
           // resolve network location
           if(this.rejectUnresolvedTopologyDN) {
             nodeS.setNetworkLocation(resolveNetworkLocation(nodeS));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 0f93fb0..f537f49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -536,7 +536,7 @@ class BPServiceActor implements Runnable {
 
     // Remove the blocks movement results after successfully transferring
     // to namenode.
-    dn.getStoragePolicySatisfyWorker().getBlocksMovementsCompletionHandler()
+    dn.getStoragePolicySatisfyWorker().getBlocksMovementsStatusHandler()
         .remove(blksMovementResults);
 
     return response;
@@ -544,7 +544,7 @@ class BPServiceActor implements Runnable {
 
   private BlocksStorageMovementResult[] getBlocksMovementResults() {
     List<BlocksStorageMovementResult> trackIdVsMovementStatus = dn
-        .getStoragePolicySatisfyWorker().getBlocksMovementsCompletionHandler()
+        .getStoragePolicySatisfyWorker().getBlocksMovementsStatusHandler()
         .getBlksMovementResults();
     BlocksStorageMovementResult[] blksMovementResult =
         new BlocksStorageMovementResult[trackIdVsMovementStatus.size()];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index e623cef..99858bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
@@ -28,7 +29,7 @@ import java.util.concurrent.Future;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementResult;
-import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlocksMovementsCompletionHandler;
+import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlocksMovementsStatusHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,32 +42,34 @@ public class BlockStorageMovementTracker implements Runnable {
   private static final Logger LOG = LoggerFactory
       .getLogger(BlockStorageMovementTracker.class);
   private final CompletionService<BlockMovementResult> moverCompletionService;
-  private final BlocksMovementsCompletionHandler blksMovementscompletionHandler;
+  private final BlocksMovementsStatusHandler blksMovementsStatusHandler;
 
   // Keeps the information - trackID vs its list of blocks
   private final Map<Long, List<Future<BlockMovementResult>>> moverTaskFutures;
   private final Map<Long, List<BlockMovementResult>> movementResults;
 
+  private volatile boolean running = true;
+
   /**
    * BlockStorageMovementTracker constructor.
    *
    * @param moverCompletionService
    *          completion service.
    * @param handler
-   *          blocks movements completion handler
+   *          blocks movements status handler
    */
   public BlockStorageMovementTracker(
       CompletionService<BlockMovementResult> moverCompletionService,
-      BlocksMovementsCompletionHandler handler) {
+      BlocksMovementsStatusHandler handler) {
     this.moverCompletionService = moverCompletionService;
     this.moverTaskFutures = new HashMap<>();
-    this.blksMovementscompletionHandler = handler;
+    this.blksMovementsStatusHandler = handler;
     this.movementResults = new HashMap<>();
   }
 
   @Override
   public void run() {
-    while (true) {
+    while (running) {
       if (moverTaskFutures.size() <= 0) {
         try {
           synchronized (moverTaskFutures) {
@@ -95,8 +98,8 @@ public class BlockStorageMovementTracker implements Runnable {
             synchronized (moverTaskFutures) {
               moverTaskFutures.remove(trackId);
             }
-            // handle completed blocks movements per trackId.
-            blksMovementscompletionHandler.handle(resultPerTrackIdList);
+            // handle completed or inprogress blocks movements per trackId.
+            blksMovementsStatusHandler.handle(resultPerTrackIdList);
             movementResults.remove(trackId);
           }
         }
@@ -158,4 +161,22 @@ public class BlockStorageMovementTracker implements Runnable {
       movementResults.clear();
     }
   }
+
+  /**
+   * @return the list of trackIds which are still waiting to complete all the
+   *         scheduled blocks movements.
+   */
+  Set<Long> getInProgressTrackIds() {
+    synchronized (moverTaskFutures) {
+      return moverTaskFutures.keySet();
+    }
+  }
+
+  /**
+   * Sets running flag to false and clear the pending movement result queues.
+   */
+  public void stopTracking() {
+    running = false;
+    removeAll();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 5a8ad68..6c71785 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1423,6 +1423,7 @@ public class DataNode extends ReconfigurableBase
     blockRecoveryWorker = new BlockRecoveryWorker(this);
     storagePolicySatisfyWorker =
         new StoragePolicySatisfyWorker(getConf(), this);
+    storagePolicySatisfyWorker.start();
 
     blockPoolManager = new BlockPoolManager(this);
     blockPoolManager.refreshNamenodes(getConf());
@@ -1968,7 +1969,11 @@ public class DataNode extends ReconfigurableBase
         }
       }
     }
-    
+
+    // stop storagePolicySatisfyWorker
+    if (storagePolicySatisfyWorker != null) {
+      storagePolicySatisfyWorker.stop();
+    }
     List<BPOfferService> bposArray = (this.blockPoolManager == null)
         ? new ArrayList<BPOfferService>()
         : this.blockPoolManager.getAllNamenodeThreads();
@@ -2117,6 +2122,11 @@ public class DataNode extends ReconfigurableBase
       notifyAll();
     }
     tracer.close();
+
+    // Waiting to finish SPS worker thread.
+    if (storagePolicySatisfyWorker != null) {
+      storagePolicySatisfyWorker.waitToFinishWorkerThread();
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index a96ac98..f4f97dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+import static org.apache.hadoop.util.Time.monotonicNow;
 
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
@@ -31,7 +32,9 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ExecutorCompletionService;
@@ -87,10 +90,13 @@ public class StoragePolicySatisfyWorker {
   private final int moverThreads;
   private final ExecutorService moveExecutor;
   private final CompletionService<BlockMovementResult> moverCompletionService;
-  private final BlocksMovementsCompletionHandler handler;
+  private final BlocksMovementsStatusHandler handler;
   private final BlockStorageMovementTracker movementTracker;
   private Daemon movementTrackerThread;
 
+  private long inprogressTrackIdsCheckInterval = 30 * 1000; // 30seconds.
+  private long nextInprogressRecheckTime;
+
   public StoragePolicySatisfyWorker(Configuration conf, DataNode datanode) {
     this.datanode = datanode;
     this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
@@ -99,15 +105,52 @@ public class StoragePolicySatisfyWorker {
         DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
     moveExecutor = initializeBlockMoverThreadPool(moverThreads);
     moverCompletionService = new ExecutorCompletionService<>(moveExecutor);
-    handler = new BlocksMovementsCompletionHandler();
+    handler = new BlocksMovementsStatusHandler();
     movementTracker = new BlockStorageMovementTracker(moverCompletionService,
         handler);
     movementTrackerThread = new Daemon(movementTracker);
     movementTrackerThread.setName("BlockStorageMovementTracker");
-    movementTrackerThread.start();
+
+    // Interval to check that the inprogress trackIds. The time interval is
+    // proportional o the heart beat interval time period.
+    final long heartbeatIntervalSeconds = conf.getTimeDuration(
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
+    inprogressTrackIdsCheckInterval = 5 * heartbeatIntervalSeconds;
+    // update first inprogress recheck time to a future time stamp.
+    nextInprogressRecheckTime = monotonicNow()
+        + inprogressTrackIdsCheckInterval;
+
     // TODO: Needs to manage the number of concurrent moves per DataNode.
   }
 
+  /**
+   * Start StoragePolicySatisfyWorker, which will start block movement tracker
+   * thread to track the completion of block movements.
+   */
+  void start() {
+    movementTrackerThread.start();
+  }
+
+  /**
+   * Stop StoragePolicySatisfyWorker, which will stop block movement tracker
+   * thread.
+   */
+  void stop() {
+    movementTrackerThread.interrupt();
+    movementTracker.stopTracking();
+  }
+
+  /**
+   * Timed wait to stop BlockStorageMovement tracker daemon thread.
+   */
+  void waitToFinishWorkerThread() {
+    try {
+      movementTrackerThread.join(3000);
+    } catch (InterruptedException ie) {
+    }
+  }
+
   private ThreadPoolExecutor initializeBlockMoverThreadPool(int num) {
     LOG.debug("Block mover to satisfy storage policy; pool threads={}", num);
 
@@ -352,11 +395,11 @@ public class StoragePolicySatisfyWorker {
   }
 
   /**
-   * Blocks movements completion handler, which is used to collect details of
-   * the completed list of block movements and this status(success or failure)
-   * will be send to the namenode via heartbeat.
+   * Blocks movements status handler, which is used to collect details of the
+   * completed or inprogress list of block movements and this status(success or
+   * failure or inprogress) will be send to the namenode via heartbeat.
    */
-  static class BlocksMovementsCompletionHandler {
+  class BlocksMovementsStatusHandler {
     private final List<BlocksStorageMovementResult> trackIdVsMovementStatus =
         new ArrayList<>();
 
@@ -395,14 +438,21 @@ public class StoragePolicySatisfyWorker {
      * @return unmodifiable list of blocks storage movement results.
      */
     List<BlocksStorageMovementResult> getBlksMovementResults() {
+      List<BlocksStorageMovementResult> movementResults = new ArrayList<>();
+      // 1. Adding all the completed trackids.
       synchronized (trackIdVsMovementStatus) {
-        if (trackIdVsMovementStatus.size() <= 0) {
-          return new ArrayList<>();
+        if (trackIdVsMovementStatus.size() > 0) {
+          movementResults = Collections
+              .unmodifiableList(trackIdVsMovementStatus);
         }
-        List<BlocksStorageMovementResult> results = Collections
-            .unmodifiableList(trackIdVsMovementStatus);
-        return results;
       }
+      // 2. Adding the in progress track ids after those which are completed.
+      Set<Long> inProgressTrackIds = getInProgressTrackIds();
+      for (Long trackId : inProgressTrackIds) {
+        movementResults.add(new BlocksStorageMovementResult(trackId,
+            BlocksStorageMovementResult.Status.IN_PROGRESS));
+      }
+      return movementResults;
     }
 
     /**
@@ -433,7 +483,7 @@ public class StoragePolicySatisfyWorker {
   }
 
   @VisibleForTesting
-  BlocksMovementsCompletionHandler getBlocksMovementsCompletionHandler() {
+  BlocksMovementsStatusHandler getBlocksMovementsStatusHandler() {
     return handler;
   }
 
@@ -447,4 +497,23 @@ public class StoragePolicySatisfyWorker {
     movementTracker.removeAll();
     handler.removeAll();
   }
+
+  /**
+   * Gets list of trackids which are inprogress. Will do collection periodically
+   * on 'dfs.datanode.storage.policy.satisfier.worker.inprogress.recheck.time.
+   * millis' interval.
+   *
+   * @return collection of trackids which are inprogress
+   */
+  private Set<Long> getInProgressTrackIds() {
+    Set<Long> trackIds = new HashSet<>();
+    long now = monotonicNow();
+    if (nextInprogressRecheckTime >= now) {
+      trackIds = movementTracker.getInProgressTrackIds();
+
+      // schedule next re-check interval
+      nextInprogressRecheckTime = now + inprogressTrackIdsCheckInterval;
+    }
+    return trackIds;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 26b98d8..f2406da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -29,6 +29,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult.Status;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -39,9 +40,11 @@ import com.google.common.annotations.VisibleForTesting;
  * A monitor class for checking whether block storage movements finished or not.
  * If block storage movement results from datanode indicates about the movement
  * success, then it will just remove the entries from tracking. If it reports
- * failure, then it will add back to needed block storage movements list. If no
- * DN reports about movement for longer time, then such items will be retries
- * automatically after timeout. The default timeout would be 30mins.
+ * failure, then it will add back to needed block storage movements list. If it
+ * reports in_progress, that means the blocks movement is in progress and the
+ * coordinator is still tracking the movement. If no DN reports about movement
+ * for longer time, then such items will be retries automatically after timeout.
+ * The default timeout would be 30mins.
  */
 public class BlockStorageMovementAttemptedItems {
   private static final Logger LOG =
@@ -57,10 +60,10 @@ public class BlockStorageMovementAttemptedItems {
   private Daemon timerThread = null;
   private final StoragePolicySatisfier sps;
   //
-  // It might take anywhere between 30 to 60 minutes before
+  // It might take anywhere between 20 to 60 minutes before
   // a request is timed out.
   //
-  private long selfRetryTimeout = 30 * 60 * 1000;
+  private long selfRetryTimeout = 20 * 60 * 1000;
 
   //
   // It might take anywhere between 5 to 10 minutes before
@@ -159,35 +162,35 @@ public class BlockStorageMovementAttemptedItems {
 
   /**
    * This class contains information of an attempted trackID. Information such
-   * as, (a)last attempted time stamp, (b)whether all the blocks in the trackID
-   * were attempted and blocks movement has been scheduled to satisfy storage
-   * policy. This is used by
+   * as, (a)last attempted or reported time stamp, (b)whether all the blocks in
+   * the trackID were attempted and blocks movement has been scheduled to
+   * satisfy storage policy. This is used by
    * {@link BlockStorageMovementAttemptedItems#storageMovementAttemptedItems}.
    */
   private final static class ItemInfo {
-    private final long lastAttemptedTimeStamp;
+    private long lastAttemptedOrReportedTime;
     private final boolean allBlockLocsAttemptedToSatisfy;
 
     /**
      * ItemInfo constructor.
      *
-     * @param lastAttemptedTimeStamp
-     *          last attempted time stamp
+     * @param lastAttemptedOrReportedTime
+     *          last attempted or reported time
      * @param allBlockLocsAttemptedToSatisfy
      *          whether all the blocks in the trackID were attempted and blocks
      *          movement has been scheduled to satisfy storage policy
      */
-    private ItemInfo(long lastAttemptedTimeStamp,
+    private ItemInfo(long lastAttemptedOrReportedTime,
         boolean allBlockLocsAttemptedToSatisfy) {
-      this.lastAttemptedTimeStamp = lastAttemptedTimeStamp;
+      this.lastAttemptedOrReportedTime = lastAttemptedOrReportedTime;
       this.allBlockLocsAttemptedToSatisfy = allBlockLocsAttemptedToSatisfy;
     }
 
     /**
-     * @return last attempted time stamp.
+     * @return last attempted or reported time stamp.
      */
-    private long getLastAttemptedTimeStamp() {
-      return lastAttemptedTimeStamp;
+    private long getLastAttemptedOrReportedTime() {
+      return lastAttemptedOrReportedTime;
     }
 
     /**
@@ -200,6 +203,14 @@ public class BlockStorageMovementAttemptedItems {
     private boolean isAllBlockLocsAttemptedToSatisfy() {
       return allBlockLocsAttemptedToSatisfy;
     }
+
+    /**
+     * Update lastAttemptedOrReportedTime, so that the expiration time will be
+     * postponed to future.
+     */
+    private void touchLastReportedTimeStamp() {
+      this.lastAttemptedOrReportedTime = monotonicNow();
+    }
   }
 
   /**
@@ -234,7 +245,8 @@ public class BlockStorageMovementAttemptedItems {
       while (iter.hasNext()) {
         Entry<Long, ItemInfo> entry = iter.next();
         ItemInfo itemInfo = entry.getValue();
-        if (now > itemInfo.getLastAttemptedTimeStamp() + selfRetryTimeout) {
+        if (now > itemInfo.getLastAttemptedOrReportedTime()
+            + selfRetryTimeout) {
           Long blockCollectionID = entry.getKey();
           synchronized (storageMovementAttemptedResults) {
             if (!isExistInResult(blockCollectionID)) {
@@ -273,6 +285,7 @@ public class BlockStorageMovementAttemptedItems {
       Iterator<BlocksStorageMovementResult> resultsIter =
           storageMovementAttemptedResults.iterator();
       while (resultsIter.hasNext()) {
+        boolean isInprogress = false;
         // TrackID need to be retried in the following cases:
         // 1) All or few scheduled block(s) movement has been failed.
         // 2) All the scheduled block(s) movement has been succeeded but there
@@ -282,16 +295,19 @@ public class BlockStorageMovementAttemptedItems {
         BlocksStorageMovementResult storageMovementAttemptedResult = resultsIter
             .next();
         synchronized (storageMovementAttemptedItems) {
-          if (storageMovementAttemptedResult
-              .getStatus() == BlocksStorageMovementResult.Status.FAILURE) {
+          Status status = storageMovementAttemptedResult.getStatus();
+          ItemInfo itemInfo;
+          switch (status) {
+          case FAILURE:
             blockStorageMovementNeeded
                 .add(storageMovementAttemptedResult.getTrackId());
             LOG.warn("Blocks storage movement results for the tracking id: {}"
                 + " is reported from co-ordinating datanode, but result"
                 + " status is FAILURE. So, added for retry",
                 storageMovementAttemptedResult.getTrackId());
-          } else {
-            ItemInfo itemInfo = storageMovementAttemptedItems
+            break;
+          case SUCCESS:
+            itemInfo = storageMovementAttemptedItems
                 .get(storageMovementAttemptedResult.getTrackId());
 
             // ItemInfo could be null. One case is, before the blocks movements
@@ -320,10 +336,26 @@ public class BlockStorageMovementAttemptedItems {
               this.sps.notifyBlkStorageMovementFinished(
                   storageMovementAttemptedResult.getTrackId());
             }
+            break;
+          case IN_PROGRESS:
+            isInprogress = true;
+            itemInfo = storageMovementAttemptedItems
+                .get(storageMovementAttemptedResult.getTrackId());
+            if(itemInfo != null){
+              // update the attempted expiration time to next cycle.
+              itemInfo.touchLastReportedTimeStamp();
+            }
+            break;
+          default:
+            LOG.error("Unknown status: {}", status);
+            break;
+          }
+          // Remove trackID from the attempted list if the attempt has been
+          // completed(success or failure), if any.
+          if (!isInprogress) {
+            storageMovementAttemptedItems
+                .remove(storageMovementAttemptedResult.getTrackId());
           }
-          // Remove trackID from the attempted list, if any.
-          storageMovementAttemptedItems
-              .remove(storageMovementAttemptedResult.getTrackId());
         }
         // Remove trackID from results as processed above.
         resultsIter.remove();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 8cf9920..8be0a2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -108,6 +108,11 @@ public class StoragePolicySatisfier implements Runnable {
     } else {
       LOG.info("Starting StoragePolicySatisfier.");
     }
+
+    // Ensure that all the previously submitted block movements(if any) have to
+    // be stopped in all datanodes.
+    addDropSPSWorkCommandsToAllDNs();
+
     storagePolicySatisfierThread = new Daemon(this);
     storagePolicySatisfierThread.setName("StoragePolicySatisfier");
     storagePolicySatisfierThread.start();
@@ -133,7 +138,7 @@ public class StoragePolicySatisfier implements Runnable {
       LOG.info("Stopping StoragePolicySatisfier, as admin requested to "
           + "deactivate it.");
       this.clearQueuesWithNotification();
-      this.blockManager.getDatanodeManager().addDropSPSWorkCommandsToAllDNs();
+      addDropSPSWorkCommandsToAllDNs();
     } else {
       LOG.info("Stopping StoragePolicySatisfier.");
     }
@@ -170,6 +175,14 @@ public class StoragePolicySatisfier implements Runnable {
     return namesystem.isFileOpenedForWrite(moverId);
   }
 
+  /**
+   * Adding drop commands to all datanodes to stop performing the satisfier
+   * block movements, if any.
+   */
+  private void addDropSPSWorkCommandsToAllDNs() {
+    this.blockManager.getDatanodeManager().addDropSPSWorkCommandsToAllDNs();
+  }
+
   @Override
   public void run() {
     boolean isMoverRunning = !checkIfMoverRunning();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
index 713b83b..b484eb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
@@ -35,9 +35,13 @@ public class BlocksStorageMovementResult {
    * retry these failed blocks movements. Example selected target node is no
    * more running or no space. So, retrying by selecting new target node might
    * work.
+   *
+   * <p>
+   * IN_PROGRESS - If all or some of the blocks associated to track id are
+   * still moving.
    */
   public static enum Status {
-    SUCCESS, FAILURE;
+    SUCCESS, FAILURE, IN_PROGRESS;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index 899dc7e..080f7fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -192,6 +192,7 @@ message BlocksStorageMovementResultProto {
   enum Status {
     SUCCESS = 1; // block movement succeeded
     FAILURE = 2; // block movement failed and needs to retry
+    IN_PROGRESS = 3; // block movement is still in progress
   }
   required uint64 trackID = 1;
   required Status status = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
index 86b8b50..8fbbf33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -176,16 +176,21 @@ public class TestStoragePolicySatisfyWorker {
 
     StoragePolicySatisfyWorker worker = new StoragePolicySatisfyWorker(conf,
         src);
-    List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
-    BlockMovingInfo blockMovingInfo = prepareBlockMovingInfo(
-        lb.getBlock().getLocalBlock(), lb.getLocations()[0], targetDnInfo,
-        lb.getStorageTypes()[0], StorageType.ARCHIVE);
-    blockMovingInfos.add(blockMovingInfo);
-    INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
-    worker.processBlockMovingTasks(inode.getId(),
-        cluster.getNamesystem().getBlockPoolId(), blockMovingInfos);
-
-    waitForBlockMovementCompletion(worker, inode.getId(), 1, 30000);
+    try {
+      worker.start();
+      List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
+      BlockMovingInfo blockMovingInfo = prepareBlockMovingInfo(
+          lb.getBlock().getLocalBlock(), lb.getLocations()[0], targetDnInfo,
+          lb.getStorageTypes()[0], StorageType.ARCHIVE);
+      blockMovingInfos.add(blockMovingInfo);
+      INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
+      worker.processBlockMovingTasks(inode.getId(),
+          cluster.getNamesystem().getBlockPoolId(), blockMovingInfos);
+
+      waitForBlockMovementCompletion(worker, inode.getId(), 1, 30000);
+    } finally {
+      worker.stop();
+    }
   }
 
   /**
@@ -212,24 +217,29 @@ public class TestStoragePolicySatisfyWorker {
 
     StoragePolicySatisfyWorker worker =
         new StoragePolicySatisfyWorker(conf, src);
-    List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
-    List<LocatedBlock> locatedBlocks =
-        dfs.getClient().getLocatedBlocks(file, 0).getLocatedBlocks();
-    for (LocatedBlock locatedBlock : locatedBlocks) {
-      BlockMovingInfo blockMovingInfo =
-          prepareBlockMovingInfo(locatedBlock.getBlock().getLocalBlock(),
-              locatedBlock.getLocations()[0], targetDnInfo,
-              locatedBlock.getStorageTypes()[0], StorageType.ARCHIVE);
-      blockMovingInfos.add(blockMovingInfo);
+    worker.start();
+    try {
+      List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
+      List<LocatedBlock> locatedBlocks =
+          dfs.getClient().getLocatedBlocks(file, 0).getLocatedBlocks();
+      for (LocatedBlock locatedBlock : locatedBlocks) {
+        BlockMovingInfo blockMovingInfo =
+            prepareBlockMovingInfo(locatedBlock.getBlock().getLocalBlock(),
+                locatedBlock.getLocations()[0], targetDnInfo,
+                locatedBlock.getStorageTypes()[0], StorageType.ARCHIVE);
+        blockMovingInfos.add(blockMovingInfo);
+      }
+      INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
+      worker.processBlockMovingTasks(inode.getId(),
+          cluster.getNamesystem().getBlockPoolId(), blockMovingInfos);
+      // Wait till results queue build up
+      waitForBlockMovementResult(worker, inode.getId(), 30000);
+      worker.dropSPSWork();
+      assertTrue(worker.getBlocksMovementsStatusHandler()
+          .getBlksMovementResults().size() == 0);
+    } finally {
+      worker.stop();
     }
-    INode inode = cluster.getNamesystem().getFSDirectory().getINode(file);
-    worker.processBlockMovingTasks(inode.getId(),
-        cluster.getNamesystem().getBlockPoolId(), blockMovingInfos);
-    // Wait till results queue build up
-    waitForBlockMovementResult(worker, inode.getId(), 30000);
-    worker.dropSPSWork();
-    assertTrue(worker.getBlocksMovementsCompletionHandler()
-        .getBlksMovementResults().size() == 0);
   }
 
   private void waitForBlockMovementResult(
@@ -239,7 +249,7 @@ public class TestStoragePolicySatisfyWorker {
       @Override
       public Boolean get() {
         List<BlocksStorageMovementResult> completedBlocks = worker
-            .getBlocksMovementsCompletionHandler().getBlksMovementResults();
+            .getBlocksMovementsStatusHandler().getBlksMovementResults();
         return completedBlocks.size() > 0;
       }
     }, 100, timeout);
@@ -252,7 +262,7 @@ public class TestStoragePolicySatisfyWorker {
       @Override
       public Boolean get() {
         List<BlocksStorageMovementResult> completedBlocks = worker
-            .getBlocksMovementsCompletionHandler().getBlksMovementResults();
+            .getBlocksMovementsStatusHandler().getBlksMovementResults();
         int failedCount = 0;
         for (BlocksStorageMovementResult blkMovementResult : completedBlocks) {
           if (blkMovementResult.getStatus() ==

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ade0d041/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
index 4d226ff..c88d5be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
@@ -17,51 +17,90 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.util.Time.monotonicNow;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
-
-import java.io.IOException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests that StoragePolicySatisfier is able to work with HA enabled.
  */
 public class TestStoragePolicySatisfierWithHA {
   private MiniDFSCluster cluster = null;
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestStoragePolicySatisfierWithHA.class);
 
-  @Before
-  public void setUp() throws IOException {
-    Configuration conf = new Configuration();
+  private final Configuration config = new HdfsConfiguration();
+  private static final int DEFAULT_BLOCK_SIZE = 1024;
+  private DistributedFileSystem dfs = null;
+
+  private StorageType[][] allDiskTypes =
+      new StorageType[][]{{StorageType.DISK, StorageType.DISK},
+          {StorageType.DISK, StorageType.DISK},
+          {StorageType.DISK, StorageType.DISK}};
+  private int numOfDatanodes = 3;
+  private int storagesPerDatanode = 2;
+  private long capacity = 2 * 256 * 1024 * 1024;
+  private int nnIndex = 0;
+
+  private void createCluster() throws IOException {
+    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    startCluster(config, allDiskTypes, numOfDatanodes, storagesPerDatanode,
+        capacity);
+    dfs = cluster.getFileSystem(nnIndex);
+  }
+
+  private void startCluster(final Configuration conf,
+      StorageType[][] storageTypes, int numberOfDatanodes, int storagesPerDn,
+      long nodeCapacity) throws IOException {
+    long[][] capacities = new long[numberOfDatanodes][storagesPerDn];
+    for (int i = 0; i < numberOfDatanodes; i++) {
+      for (int j = 0; j < storagesPerDn; j++) {
+        capacities[i][j] = nodeCapacity;
+      }
+    }
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
-        .numDataNodes(1)
-        .build();
+        .numDataNodes(numberOfDatanodes).storagesPerDatanode(storagesPerDn)
+        .storageTypes(storageTypes).storageCapacities(capacities).build();
+    cluster.waitActive();
+    cluster.transitionToActive(0);
   }
 
   /**
    * Tests to verify that SPS should run/stop automatically when NN state
    * changes between Standby and Active.
    */
-  @Test(timeout = 100000)
+  @Test(timeout = 90000)
   public void testWhenNNHAStateChanges() throws IOException {
     try {
-      DistributedFileSystem fs;
+      createCluster();
       boolean running;
 
-      cluster.waitActive();
-      fs = cluster.getFileSystem(0);
+      dfs = cluster.getFileSystem(1);
 
       try {
-        fs.getClient().isStoragePolicySatisfierRunning();
+        dfs.getClient().isStoragePolicySatisfierRunning();
         Assert.fail("Call this function to Standby NN should "
             + "raise an exception.");
       } catch (RemoteException e) {
@@ -72,14 +111,15 @@ public class TestStoragePolicySatisfierWithHA {
       }
 
       cluster.transitionToActive(0);
-      running = fs.getClient().isStoragePolicySatisfierRunning();
+      dfs = cluster.getFileSystem(0);
+      running = dfs.getClient().isStoragePolicySatisfierRunning();
       Assert.assertTrue("StoragePolicySatisfier should be active "
           + "when NN transits from Standby to Active mode.", running);
 
       // NN transits from Active to Standby
       cluster.transitionToStandby(0);
       try {
-        fs.getClient().isStoragePolicySatisfierRunning();
+        dfs.getClient().isStoragePolicySatisfierRunning();
         Assert.fail("NN in Standby again, call this function should "
             + "raise an exception.");
       } catch (RemoteException e) {
@@ -106,4 +146,104 @@ public class TestStoragePolicySatisfierWithHA {
       cluster.shutdown();
     }
   }
+
+  /**
+   * Test to verify that during namenode switch over will add
+   * DNA_DROP_SPS_WORK_COMMAND to all the datanodes. Later, this will ensure to
+   * drop all the SPS queues at datanode.
+   */
+  @Test(timeout = 90000)
+  public void testNamenodeSwitchoverShouldDropSPSWork() throws Exception {
+    try {
+      createCluster();
+
+      FSNamesystem fsn = cluster.getNamesystem(0);
+      ArrayList<DataNode> dataNodes = cluster.getDataNodes();
+      List<DatanodeDescriptor> listOfDns = new ArrayList<>();
+      for (DataNode dn : dataNodes) {
+        DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn,
+            dn.getDatanodeId());
+        listOfDns.add(dnd);
+      }
+      cluster.shutdownDataNodes();
+
+      cluster.transitionToStandby(0);
+      LOG.info("**Transition to Active**");
+      cluster.transitionToActive(1);
+
+      // Verify that Standby-to-Active transition should set drop SPS flag to
+      // true. This will ensure that DNA_DROP_SPS_WORK_COMMAND will be
+      // propagated to datanode during heartbeat response.
+      int retries = 20;
+      boolean dropSPSWork = false;
+      while (retries > 0) {
+        for (DatanodeDescriptor dnd : listOfDns) {
+          dropSPSWork = dnd.shouldDropSPSWork();
+          if (!dropSPSWork) {
+            retries--;
+            Thread.sleep(250);
+            break;
+          }
+        }
+        if (dropSPSWork) {
+          break;
+        }
+      }
+      Assert.assertTrue("Didn't drop SPS work", dropSPSWork);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test to verify that SPS work will be dropped once the datanode is marked as
+   * expired. Internally 'dropSPSWork' flag is set as true while expiration and
+   * at the time of reconnection, will send DNA_DROP_SPS_WORK_COMMAND to that
+   * datanode.
+   */
+  @Test(timeout = 90000)
+  public void testDeadDatanode() throws Exception {
+    int heartbeatExpireInterval = 2 * 2000;
+    config.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+        3000);
+    config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1000L);
+    createCluster();
+
+    DataNode dn = cluster.getDataNodes().get(0);
+    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+
+    FSNamesystem fsn = cluster.getNamesystem(0);
+    DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn,
+        dn.getDatanodeId());
+    boolean isDead = false;
+    int retries = 20;
+    while (retries > 0) {
+      isDead = dnd.getLastUpdateMonotonic() < (monotonicNow()
+          - heartbeatExpireInterval);
+      if (isDead) {
+        break;
+      }
+      retries--;
+      Thread.sleep(250);
+    }
+    Assert.assertTrue("Datanode is alive", isDead);
+    // Disable datanode heartbeat, so that the datanode will get expired after
+    // the recheck interval and become dead.
+    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
+
+    // Verify that datanode expiration will set drop SPS flag to
+    // true. This will ensure that DNA_DROP_SPS_WORK_COMMAND will be
+    // propagated to datanode during reconnection.
+    boolean dropSPSWork = false;
+    retries = 50;
+    while (retries > 0) {
+      dropSPSWork = dnd.shouldDropSPSWork();
+      if (dropSPSWork) {
+        break;
+      }
+      retries--;
+      Thread.sleep(100);
+    }
+    Assert.assertTrue("Didn't drop SPS work", dropSPSWork);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDFS-11151. [SPS]: StoragePolicySatisfier should gracefully handle when there is no target node with the required storage type. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-11151. [SPS]: StoragePolicySatisfier should gracefully handle when there is no target node with the required storage type. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68ede8e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68ede8e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68ede8e8

Branch: refs/heads/HDFS-10285
Commit: 68ede8e8cc5a3d00e399d5fe1a65b331315480cb
Parents: 19ce552
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Sun Nov 27 11:15:26 2016 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:45 2017 +0530

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |   2 +-
 .../datanode/BlockStorageMovementTracker.java   |  30 ++++--
 .../datanode/StoragePolicySatisfyWorker.java    |  20 +++-
 .../BlockStorageMovementAttemptedItems.java     |   4 +
 .../server/namenode/StoragePolicySatisfier.java |  53 ++++++---
 .../namenode/TestStoragePolicySatisfier.java    | 108 ++++++++++++++++++-
 6 files changed, 186 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ede8e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index fcc5841..763ec19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4851,7 +4851,7 @@ public class BlockManager implements BlockStatsMXBean {
    */
   public void satisfyStoragePolicy(long id) {
     storageMovementNeeded.add(id);
-    if(LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Added block collection id {} to block "
           + "storageMovementNeeded queue", id);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ede8e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index d31f075..2de88fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -27,8 +27,9 @@ import java.util.concurrent.Future;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlocksMovementsCompletionHandler;
 import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementResult;
+import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementStatus;
+import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlocksMovementsCompletionHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -108,15 +109,32 @@ public class BlockStorageMovementTracker implements Runnable {
     }
   }
 
+  /**
+   * Mark as block movement failure for the given trackId and blockId.
+   *
+   * @param trackId tracking id
+   * @param blockId block id
+   */
+  void markBlockMovementFailure(long trackId, long blockId) {
+    LOG.debug("Mark as block movement failure for the given "
+        + "trackId:{} and blockId:{}", trackId, blockId);
+    BlockMovementResult result = new BlockMovementResult(trackId, blockId, null,
+        BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE);
+    addMovementResultToTrackIdList(result);
+  }
+
   private List<BlockMovementResult> addMovementResultToTrackIdList(
       BlockMovementResult result) {
     long trackId = result.getTrackId();
-    List<BlockMovementResult> perTrackIdList = movementResults.get(trackId);
-    if (perTrackIdList == null) {
-      perTrackIdList = new ArrayList<>();
-      movementResults.put(trackId, perTrackIdList);
+    List<BlockMovementResult> perTrackIdList;
+    synchronized (movementResults) {
+      perTrackIdList = movementResults.get(trackId);
+      if (perTrackIdList == null) {
+        perTrackIdList = new ArrayList<>();
+        movementResults.put(trackId, perTrackIdList);
+      }
+      perTrackIdList.add(result);
     }
-    perTrackIdList.add(result);
     return perTrackIdList;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ede8e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 1bd851e..a69a38b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -151,14 +151,24 @@ public class StoragePolicySatisfyWorker {
    */
   public void processBlockMovingTasks(long trackID, String blockPoolID,
       Collection<BlockMovingInfo> blockMovingInfos) {
+    LOG.debug("Received BlockMovingTasks {}", blockMovingInfos);
     for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
-      assert blkMovingInfo
-          .getSources().length == blkMovingInfo.getTargets().length;
-
-      for (int i = 0; i < blkMovingInfo.getSources().length; i++) {
+      // Iterating backwards. This is to ensure that all the block src location
+      // which doesn't have a target node will be marked as failure before
+      // scheduling the block movement to valid target nodes.
+      for (int i = blkMovingInfo.getSources().length - 1; i >= 0; i--) {
+        if (i >= blkMovingInfo.getTargets().length) {
+          // Since there is no target selected for scheduling the block,
+          // just mark this block storage movement as failure. Later, namenode
+          // can take action on this.
+          movementTracker.markBlockMovementFailure(trackID,
+              blkMovingInfo.getBlock().getBlockId());
+          continue;
+        }
+        DatanodeInfo target = blkMovingInfo.getTargets()[i];
         BlockMovingTask blockMovingTask = new BlockMovingTask(
             trackID, blockPoolID, blkMovingInfo.getBlock(),
-            blkMovingInfo.getSources()[i], blkMovingInfo.getTargets()[i],
+            blkMovingInfo.getSources()[i], target,
             blkMovingInfo.getSourceStorageTypes()[i],
             blkMovingInfo.getTargetStorageTypes()[i]);
         Future<BlockMovementResult> moveCallable = moverCompletionService

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ede8e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 580d0d6..5457dc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -218,4 +218,8 @@ public class BlockStorageMovementAttemptedItems {
     return storageMovementAttemptedResults.size();
   }
 
+  @VisibleForTesting
+  public int getAttemptedItemsCount() {
+    return storageMovementAttemptedItems.size();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ede8e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 4967a89..617ab2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -211,6 +211,14 @@ public class StoragePolicySatisfier implements Runnable {
       }
     }
 
+    addBlockMovingInfosToCoordinatorDn(blockCollectionID, blockMovingInfos,
+        coordinatorNode);
+  }
+
+  private void addBlockMovingInfosToCoordinatorDn(long blockCollectionID,
+      List<BlockMovingInfo> blockMovingInfos,
+      DatanodeDescriptor coordinatorNode) {
+
     if (blockMovingInfos.size() < 1) {
       // TODO: Major: handle this case. I think we need retry cases to
       // be implemented. Idea is, if some files are not getting storage movement
@@ -218,6 +226,20 @@ public class StoragePolicySatisfier implements Runnable {
       return;
     }
 
+    boolean needBlockStorageMovement = false;
+    for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
+      // Check for atleast one block storage movement has been chosen
+      if (blkMovingInfo.getTargets().length > 0){
+        needBlockStorageMovement = true;
+        break;
+      }
+    }
+    if (!needBlockStorageMovement) {
+      // Simply return as there is no targets selected for scheduling the block
+      // movement.
+      return;
+    }
+
     // 'BlockCollectionId' is used as the tracking ID. All the blocks under this
     // blockCollectionID will be added to this datanode.
     coordinatorNode.addBlocksToMoveStorage(blockCollectionID, blockMovingInfos);
@@ -251,9 +273,8 @@ public class StoragePolicySatisfier implements Runnable {
     List<DatanodeDescriptor> chosenNodes = new ArrayList<>();
     for (int i = 0; i < sourceWithStorageList.size(); i++) {
       StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
-      StorageTypeNodePair chosenTarget =
-          chooseTargetTypeInSameNode(existingTypeNodePair.dn, expected,
-              locsForExpectedStorageTypes, chosenNodes);
+      StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(
+          existingTypeNodePair.dn, expected);
 
       if (chosenTarget == null && blockManager.getDatanodeManager()
           .getNetworkTopology().isNodeGroupAware()) {
@@ -282,15 +303,14 @@ public class StoragePolicySatisfier implements Runnable {
         chosenNodes.add(chosenTarget.dn);
         // TODO: We can increment scheduled block count for this node?
       } else {
-        // TODO: Failed to ChooseTargetNodes...So let just retry. Shall we
-        // proceed without this targets? Then what should be final result?
-        // How about pack empty target, means target node could not be chosen ,
-        // so result should be RETRY_REQUIRED from DN always.
-        // Log..unable to choose target node for source datanodeDescriptor
+        LOG.warn(
+            "Failed to choose target datanode for the required"
+                + " storage types {}, block:{}, existing storage type:{}",
+            expected, blockInfo, existingTypeNodePair.storageType);
         sourceNodes.add(existingTypeNodePair.dn);
         sourceStorageTypes.add(existingTypeNodePair.storageType);
-        targetNodes.add(null);
-        targetStorageTypes.add(null);
+        // Imp: Not setting the target details, empty targets. Later, this is
+        // used as an indicator for retrying this block movement.
       }
     }
     BlockMovingInfo blkMovingInfo = new BlockMovingInfo(blockInfo,
@@ -302,15 +322,13 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   /**
-   * Choose the target storage within same Datanode if possible.
+   * Choose the target storage within same datanode if possible.
    *
-   * @param locsForExpectedStorageTypes
-   * @param chosenNodes
+   * @param source source datanode
+   * @param targetTypes list of target storage types
    */
   private StorageTypeNodePair chooseTargetTypeInSameNode(
-      DatanodeDescriptor source, List<StorageType> targetTypes,
-      StorageTypeNodeMap locsForExpectedStorageTypes,
-      List<DatanodeDescriptor> chosenNodes) {
+      DatanodeDescriptor source, List<StorageType> targetTypes) {
     for (StorageType t : targetTypes) {
       DatanodeStorageInfo chooseStorage4Block =
           source.chooseStorage4Block(t, 0);
@@ -328,6 +346,9 @@ public class StoragePolicySatisfier implements Runnable {
     for (StorageType t : targetTypes) {
       List<DatanodeDescriptor> nodesWithStorages =
           locsForExpectedStorageTypes.getNodesWithStorages(t);
+      if (nodesWithStorages == null || nodesWithStorages.isEmpty()) {
+        continue; // no target nodes with the required storage type.
+      }
       Collections.shuffle(nodesWithStorages);
       for (DatanodeDescriptor target : nodesWithStorages) {
         if (!chosenNodes.contains(target) && matcher.match(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ede8e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 901e1ba..499fe3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -44,8 +46,6 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-
 /**
  * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
  * moved and finding its suggested target locations to move.
@@ -79,7 +79,7 @@ public class TestStoragePolicySatisfier {
       throws Exception {
 
     try {
-      // Change policy to ALL_SSD
+      // Change policy to COLD
       dfs.setStoragePolicy(new Path(file), "COLD");
       FSNamesystem namesystem = hdfsCluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode(file);
@@ -356,6 +356,108 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Tests to verify that for the given path, some of the blocks or block src
+   * locations(src nodes) under the given path will be scheduled for block
+   * movement.
+   *
+   * For example, there are two block for a file:
+   *
+   * File1 => blk_1[locations=A(DISK),B(DISK),C(DISK)],
+   * blk_2[locations=A(DISK),B(DISK),C(DISK)]. Now, set storage policy to COLD.
+   * Only one datanode is available with storage type ARCHIVE, say D.
+   *
+   * SPS will schedule block movement to the coordinator node with the details,
+   * blk_1[move A(DISK) -> D(ARCHIVE)], blk_2[move A(DISK) -> D(ARCHIVE)].
+   */
+  @Test(timeout = 300000)
+  public void testWhenOnlyFewTargetDatanodeAreAvailableToSatisfyStoragePolicy()
+      throws Exception {
+    try {
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(file), "COLD");
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE}};
+
+      // Adding ARCHIVE based datanodes.
+      startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      hdfsCluster.triggerHeartbeats();
+      // Wait till StorgePolicySatisfier identified that block to move to
+      // ARCHIVE area.
+      waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000);
+      waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
+
+      waitForBlocksMovementResult(1, 30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests to verify that for the given path, no blocks or block src
+   * locations(src nodes) under the given path will be scheduled for block
+   * movement as there are no available datanode with required storage type.
+   *
+   * For example, there are two block for a file:
+   *
+   * File1 => blk_1[locations=A(DISK),B(DISK),C(DISK)],
+   * blk_2[locations=A(DISK),B(DISK),C(DISK)]. Now, set storage policy to COLD.
+   * No datanode is available with storage type ARCHIVE.
+   *
+   * SPS won't schedule any block movement for this path.
+   */
+  @Test(timeout = 300000)
+  public void testWhenNoTargetDatanodeToSatisfyStoragePolicy()
+      throws Exception {
+    try {
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(file), "COLD");
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.DISK, StorageType.DISK}};
+      // Adding DISK based datanodes
+      startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      hdfsCluster.triggerHeartbeats();
+
+      // No block movement will be scheduled as there is no target node available
+      // with the required storage type.
+      waitForAttemptedItems(1, 30000);
+      waitExpectedStorageType(file, StorageType.DISK, 3, 30000);
+      // Since there is no target node the item will get timed out and then
+      // re-attempted.
+      waitForAttemptedItems(1, 30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  private void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
+      int timeout) throws TimeoutException, InterruptedException {
+    BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
+    final StoragePolicySatisfier sps = blockManager.getStoragePolicySatisfier();
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        LOG.info("expectedAttemptedItemsCount={} actualAttemptedItemsCount={}",
+            expectedBlkMovAttemptedCount,
+            sps.getAttemptedItemsMonitor().getAttemptedItemsCount());
+        return sps.getAttemptedItemsMonitor()
+            .getAttemptedItemsCount() == expectedBlkMovAttemptedCount;
+      }
+    }, 100, timeout);
+  }
+
   private void waitForBlocksMovementResult(long expectedBlkMovResultsCount,
       int timeout) throws TimeoutException, InterruptedException {
     BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDFS-10802. [SPS]: Add satisfyStoragePolicy API in HdfsAdmin. Contributed by Yuanbo Liu

Posted by ra...@apache.org.
HDFS-10802. [SPS]: Add satisfyStoragePolicy API in HdfsAdmin. Contributed by Yuanbo Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19ce552e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19ce552e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19ce552e

Branch: refs/heads/HDFS-10285
Commit: 19ce552e919635afeedb86b517559b61dcd3dd34
Parents: b7a29eb
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Thu Nov 17 14:07:45 2016 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:40 2017 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  19 ++-
 .../hadoop/hdfs/DistributedFileSystem.java      |  34 +++++
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |  10 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  14 +++
 .../ClientNamenodeProtocolTranslatorPB.java     |  11 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  10 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java |  18 +++
 .../server/blockmanagement/BlockManager.java    |   4 +
 .../hdfs/server/namenode/FSDirAttrOp.java       |  56 +++++++++
 .../hdfs/server/namenode/FSNamesystem.java      |  18 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   6 +
 .../namenode/TestStoragePolicySatisfier.java    | 124 +++++++++++++++++++
 12 files changed, 323 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 9239df3..e6f731d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3068,7 +3068,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
    * has REPLICATION policy.
    * @throws IOException
    */
-
   public ErasureCodingPolicy getErasureCodingPolicy(String src)
       throws IOException {
     checkOpen();
@@ -3081,6 +3080,24 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
   }
 
+  /**
+   * Satisfy storage policy for an existing file/directory.
+   * @param src file/directory name
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(String src) throws IOException {
+    checkOpen();
+    try (TraceScope ignored =
+        newPathTraceScope("satisfyStoragePolicy", src)) {
+      namenode.satisfyStoragePolicy(src);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+          FileNotFoundException.class,
+          SafeModeException.class,
+          UnresolvedPathException.class);
+    }
+  }
+
   Tracer getTracer() {
     return tracer;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f3605fa..480ef73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2570,6 +2570,40 @@ public class DistributedFileSystem extends FileSystem {
   }
 
   /**
+   * Set the source path to satisfy storage policy. This API is non-recursive
+   * in nature, i.e., if the source path is a directory then all the files
+   * immediately under the directory would be considered for satisfying the
+   * policy and the sub-directories if any under this path will be skipped.
+   *
+   * @param path The source path referring to either a directory or a file.
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(final Path path) throws IOException {
+    Path absF = fixRelativePart(path);
+    new FileSystemLinkResolver<Void>() {
+
+      @Override
+      public Void doCall(Path p) throws IOException {
+        dfs.satisfyStoragePolicy(getPathName(p));
+        return null;
+      }
+
+      @Override
+      public Void next(FileSystem fs, Path p) throws IOException {
+        // DFS only
+        if (fs instanceof  DistributedFileSystem) {
+          DistributedFileSystem myDfs = (DistributedFileSystem) fs;
+          myDfs.satisfyStoragePolicy(p);
+          return null;
+        }
+        throw new UnsupportedOperationException(
+            "Cannot satisfyStoragePolicy through a symlink to a "
+                + "non-DistributedFileSystem: " + path + " -> " + p);
+      }
+    }.resolve(this, absF);
+  }
+
+  /**
    * Get erasure coding policy information for the specified path
    *
    * @param path The path of the file or directory

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 85a7efe..b580e0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -533,6 +533,16 @@ public class HdfsAdmin {
   }
 
   /**
+   * Set the source path to the specified storage policy.
+   *
+   * @param path The source path referring to either a directory or a file.
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(final Path path) throws IOException {
+    dfs.satisfyStoragePolicy(path);
+  }
+
+  /**
    * Get the Erasure coding policies supported.
    *
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index b550467..4232fef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1670,4 +1670,18 @@ public interface ClientProtocol {
    */
   @Idempotent
   BatchedEntries<OpenFileEntry> listOpenFiles(long prevId) throws IOException;
+
+  /**
+   * Satisfy the storage policy for a file/directory.
+   * @param path Path of an existing file/directory.
+   * @throws AccessControlException If access is denied.
+   * @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>src</code>
+   *           contains a symlink.
+   * @throws java.io.FileNotFoundException If file/dir <code>src</code> is not
+   *           found.
+   * @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException append not
+   *           allowed in safemode.
+   */
+  @Idempotent
+  void satisfyStoragePolicy(String path) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index ec7d93f..7b5ca4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -177,6 +177,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Trunca
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.*;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
@@ -1862,4 +1863,14 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
   }
 
+  @Override
+  public void satisfyStoragePolicy(String src) throws IOException {
+    SatisfyStoragePolicyRequestProto req =
+        SatisfyStoragePolicyRequestProto.newBuilder().setSrc(src).build();
+    try {
+      rpcProxy.satisfyStoragePolicy(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 3f108fa..c601283 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -793,6 +793,14 @@ message ListOpenFilesResponseProto {
   required bool hasMore = 2;
 }
 
+message SatisfyStoragePolicyRequestProto {
+  required string src = 1;
+}
+
+message SatisfyStoragePolicyResponseProto {
+
+}
+
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
@@ -973,4 +981,6 @@ service ClientNamenodeProtocol {
       returns(GetQuotaUsageResponseProto);
   rpc listOpenFiles(ListOpenFilesRequestProto)
       returns(ListOpenFilesResponseProto);
+  rpc satisfyStoragePolicy(SatisfyStoragePolicyRequestProto)
+      returns(SatisfyStoragePolicyResponseProto);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 44d5216..8d6d570 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -216,6 +216,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SatisfyStoragePolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto;
@@ -394,6 +396,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   private static final CheckAccessResponseProto
     VOID_CHECKACCESS_RESPONSE = CheckAccessResponseProto.getDefaultInstance();
 
+  private static final SatisfyStoragePolicyResponseProto
+      VOID_SATISFYSTORAGEPOLICY_RESPONSE = SatisfyStoragePolicyResponseProto
+      .getDefaultInstance();
+
   /**
    * Constructor
    * 
@@ -1816,4 +1822,16 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       throw new ServiceException(e);
     }
   }
+
+  @Override
+  public SatisfyStoragePolicyResponseProto satisfyStoragePolicy(
+      RpcController controller,
+      SatisfyStoragePolicyRequestProto request) throws ServiceException {
+    try {
+      server.satisfyStoragePolicy(request.getSrc());
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return VOID_SATISFYSTORAGEPOLICY_RESPONSE;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8cc09a9..fcc5841 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4851,6 +4851,10 @@ public class BlockManager implements BlockStatsMXBean {
    */
   public void satisfyStoragePolicy(long id) {
     storageMovementNeeded.add(id);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Added block collection id {} to block "
+          + "storageMovementNeeded queue", id);
+    }
   }
 
   public StoragePolicySatisfier getStoragePolicySatisfier() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index d4b24f5..a80f751 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -197,6 +197,33 @@ public class FSDirAttrOp {
     return fsd.getAuditFileInfo(iip);
   }
 
+  static void satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
+      String src) throws IOException {
+
+    // make sure storage policy is enabled, otherwise
+    // there is no need to satisfy storage policy.
+    if (!fsd.isStoragePolicyEnabled()) {
+      throw new IOException(String.format(
+          "Failed to satisfy storage policy since %s is set to false.",
+          DFS_STORAGE_POLICY_ENABLED_KEY));
+    }
+
+    FSPermissionChecker pc = fsd.getPermissionChecker();
+    INodesInPath iip;
+    fsd.writeLock();
+    try {
+
+      // check operation permission.
+      iip = fsd.resolvePath(pc, src, DirOp.WRITE);
+      if (fsd.isPermissionEnabled()) {
+        fsd.checkPathAccess(pc, iip, FsAction.WRITE);
+      }
+      unprotectedSatisfyStoragePolicy(bm, iip);
+    } finally {
+      fsd.writeUnlock();
+    }
+  }
+
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
       throws IOException {
     return bm.getStoragePolicies();
@@ -458,6 +485,35 @@ public class FSDirAttrOp {
     }
   }
 
+  static void unprotectedSatisfyStoragePolicy(BlockManager bm,
+      INodesInPath iip) throws IOException {
+
+    // check whether file exists.
+    INode inode = iip.getLastINode();
+    if (inode == null) {
+      throw new FileNotFoundException("File/Directory does not exist: "
+          + iip.getPath());
+    }
+
+    // TODO: need to check whether inode's storage policy
+    // has been satisfied or inode exists in the satisfier
+    // list before calling satisfyStoragePolicy in BlockManager.
+    if (inode.isDirectory()) {
+      final int snapshotId = iip.getLatestSnapshotId();
+      for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
+        if (node.isFile()) {
+          bm.satisfyStoragePolicy(node.getId());
+
+        }
+      }
+    } else if (inode.isFile()) {
+      bm.satisfyStoragePolicy(inode.getId());
+    } else {
+      throw new FileNotFoundException("File/Directory does not exist: "
+          + iip.getPath());
+    }
+  }
+
   private static void setDirStoragePolicy(
       FSDirectory fsd, INodesInPath iip, byte policyId) throws IOException {
     INode inode = FSDirectory.resolveLastINode(iip);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d08a3fb..eefea3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2139,6 +2139,24 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
+   * Satisfy the storage policy for a file or a directory.
+   *
+   * @param src file/directory path
+   */
+  void satisfyStoragePolicy(String src) throws IOException {
+    checkOperation(OperationCategory.WRITE);
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot satisfy storage policy for " + src);
+      // TODO: need to update editlog for persistence.
+      FSDirAttrOp.satisfyStoragePolicy(dir, blockManager, src);
+    } finally {
+      writeUnlock();
+    }
+  }
+
+  /**
    * unset storage policy set for a given file or a directory.
    *
    * @param src file/directory path

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 20a5783..b9b8b69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1367,6 +1367,12 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   @Override // ClientProtocol
+  public void satisfyStoragePolicy(String src) throws IOException {
+    checkNNStartup();
+    namesystem.satisfyStoragePolicy(src);
+  }
+
+  @Override // ClientProtocol
   public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
                        StorageType type)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ce552e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index e84052f..901e1ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -24,14 +25,18 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -39,6 +44,8 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+
 /**
  * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
  * moved and finding its suggested target locations to move.
@@ -232,6 +239,123 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Tests to verify hdfsAdmin.satisfyStoragePolicy works well for file.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testSatisfyFileWithHdfsAdmin() throws Exception {
+    HdfsAdmin hdfsAdmin =
+        new HdfsAdmin(FileSystem.getDefaultUri(config), config);
+    try {
+
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(file), "COLD");
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+      startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+
+      hdfsAdmin.satisfyStoragePolicy(new Path(file));
+
+      hdfsCluster.triggerHeartbeats();
+      // Wait till namenode notified about the block location details
+      waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests to verify hdfsAdmin.satisfyStoragePolicy works well for dir.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testSatisfyDirWithHdfsAdmin() throws Exception {
+    HdfsAdmin hdfsAdmin =
+        new HdfsAdmin(FileSystem.getDefaultUri(config), config);
+
+    try {
+
+      final String subDir = "/subDir";
+      final String subFile1 = subDir + "/subFile1";
+      final String subDir2 = subDir + "/subDir2";
+      final String subFile2 = subDir2 + "/subFile2";
+      dfs.mkdirs(new Path(subDir));
+      writeContent(subFile1);
+      dfs.mkdirs(new Path(subDir2));
+      writeContent(subFile2);
+
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(subDir), "ONE_SSD");
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
+      startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+
+      hdfsAdmin.satisfyStoragePolicy(new Path(subDir));
+
+      hdfsCluster.triggerHeartbeats();
+
+      // take effect for the file in the directory.
+      waitExpectedStorageType(subFile1, StorageType.SSD, 1, 30000);
+      waitExpectedStorageType(subFile1, StorageType.DISK, 2, 30000);
+
+      // take no effect for the sub-dir's file in the directory.
+      waitExpectedStorageType(subFile2, StorageType.DEFAULT, 3, 30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests to verify hdfsAdmin.satisfyStoragePolicy exceptions.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testSatisfyWithExceptions() throws Exception {
+    try {
+      final String nonExistingFile = "/noneExistingFile";
+      hdfsCluster.getConfiguration(0).
+          setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
+      hdfsCluster.restartNameNodes();
+      hdfsCluster.waitActive();
+      HdfsAdmin hdfsAdmin =
+          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
+
+      try {
+        hdfsAdmin.satisfyStoragePolicy(new Path(file));
+        Assert.fail(String.format(
+            "Should failed to satisfy storage policy "
+                + "for %s since %s is set to false.",
+            file, DFS_STORAGE_POLICY_ENABLED_KEY));
+      } catch (IOException e) {
+        Assert.assertTrue(e.getMessage().contains(String.format(
+            "Failed to satisfy storage policy since %s is set to false.",
+            DFS_STORAGE_POLICY_ENABLED_KEY)));
+      }
+
+      hdfsCluster.getConfiguration(0).
+          setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
+      hdfsCluster.restartNameNodes();
+      hdfsCluster.waitActive();
+      hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(config), config);
+      try {
+        hdfsAdmin.satisfyStoragePolicy(new Path(nonExistingFile));
+        Assert.fail("Should throw FileNotFoundException for " +
+            nonExistingFile);
+      } catch (FileNotFoundException e) {
+
+      }
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
   private void waitForBlocksMovementResult(long expectedBlkMovResultsCount,
       int timeout) throws TimeoutException, InterruptedException {
     BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDFS-11193 : [SPS]: Erasure coded files should be considered for satisfying storage policy. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-11193 : [SPS]: Erasure coded files should be considered for satisfying storage policy. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b1c2624
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b1c2624
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b1c2624

Branch: refs/heads/HDFS-10285
Commit: 3b1c262458366a8e1e3c39c722ae7cf9115337d4
Parents: a09c7d4
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Thu Jan 5 09:30:39 2017 -0800
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:54:41 2017 +0530

----------------------------------------------------------------------
 .../blockmanagement/BlockInfoStriped.java       |  10 +
 .../server/namenode/StoragePolicySatisfier.java |  76 ++-
 ...stStoragePolicySatisfierWithStripedFile.java | 469 +++++++++++++++++++
 3 files changed, 551 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1c2624/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 790cd77..8bc63c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -244,6 +244,10 @@ public class BlockInfoStriped extends BlockInfo {
     return true;
   }
 
+  /**
+   * This class contains datanode storage information and block index in the
+   * block group.
+   */
   public static class StorageAndBlockIndex {
     private final DatanodeStorageInfo storage;
     private final byte blockIndex;
@@ -253,10 +257,16 @@ public class BlockInfoStriped extends BlockInfo {
       this.blockIndex = blockIndex;
     }
 
+    /**
+     * @return storage in the datanode.
+     */
     public DatanodeStorageInfo getStorage() {
       return storage;
     }
 
+    /**
+     * @return block index in the block group.
+     */
     public byte getBlockIndex() {
       return blockIndex;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1c2624/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 26e0775..a854bd7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.balancer.Matcher;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -226,8 +229,26 @@ public class StoragePolicySatisfier implements Runnable {
 
     for (int i = 0; i < blocks.length; i++) {
       BlockInfo blockInfo = blocks[i];
-      List<StorageType> expectedStorageTypes = existingStoragePolicy
+      List<StorageType> expectedStorageTypes;
+      if (blockInfo.isStriped()) {
+        if (ErasureCodingPolicyManager
+            .checkStoragePolicySuitableForECStripedMode(
+                existingStoragePolicyID)) {
+          expectedStorageTypes = existingStoragePolicy
+              .chooseStorageTypes((short) blockInfo.getCapacity());
+        } else {
+          // Currently we support only limited policies (HOT, COLD, ALLSSD)
+          // for EC striped mode files. SPS will ignore to move the blocks if
+          // the storage policy is not in EC Striped mode supported policies
+          LOG.warn("The storage policy " + existingStoragePolicy.getName()
+              + " is not suitable for Striped EC files. "
+              + "So, ignoring to move the blocks");
+          return false;
+        }
+      } else {
+        expectedStorageTypes = existingStoragePolicy
             .chooseStorageTypes(blockInfo.getReplication());
+      }
       foundMatchingTargetNodesForAllBlocks |= computeBlockMovingInfos(
           blockMovingInfos, blockInfo, expectedStorageTypes);
     }
@@ -439,12 +460,18 @@ public class StoragePolicySatisfier implements Runnable {
     if (sourceNodes.size() <= 0) {
       return blkMovingInfos;
     }
-    buildBlockMovingInfos(blockInfo, sourceNodes, sourceStorageTypes,
-        targetNodes, targetStorageTypes, blkMovingInfos);
+
+    if (blockInfo.isStriped()) {
+      buildStripedBlockMovingInfos(blockInfo, sourceNodes, sourceStorageTypes,
+          targetNodes, targetStorageTypes, blkMovingInfos);
+    } else {
+      buildContinuousBlockMovingInfos(blockInfo, sourceNodes,
+          sourceStorageTypes, targetNodes, targetStorageTypes, blkMovingInfos);
+    }
     return blkMovingInfos;
   }
 
-  private void buildBlockMovingInfos(BlockInfo blockInfo,
+  private void buildContinuousBlockMovingInfos(BlockInfo blockInfo,
       List<DatanodeInfo> sourceNodes, List<StorageType> sourceStorageTypes,
       List<DatanodeInfo> targetNodes, List<StorageType> targetStorageTypes,
       List<BlockMovingInfo> blkMovingInfos) {
@@ -458,6 +485,47 @@ public class StoragePolicySatisfier implements Runnable {
     blkMovingInfos.add(blkMovingInfo);
   }
 
+  private void buildStripedBlockMovingInfos(BlockInfo blockInfo,
+      List<DatanodeInfo> sourceNodes, List<StorageType> sourceStorageTypes,
+      List<DatanodeInfo> targetNodes, List<StorageType> targetStorageTypes,
+      List<BlockMovingInfo> blkMovingInfos) {
+    // For a striped block, it needs to construct internal block at the given
+    // index of a block group. Here it is iterating over all the block indices
+    // and construct internal blocks which can be then considered for block
+    // movement.
+    BlockInfoStriped sBlockInfo = (BlockInfoStriped) blockInfo;
+    for (StorageAndBlockIndex si : sBlockInfo.getStorageAndIndexInfos()) {
+      if (si.getBlockIndex() >= 0) {
+        DatanodeDescriptor dn = si.getStorage().getDatanodeDescriptor();
+        DatanodeInfo[] srcNode = new DatanodeInfo[1];
+        StorageType[] srcStorageType = new StorageType[1];
+        DatanodeInfo[] targetNode = new DatanodeInfo[1];
+        StorageType[] targetStorageType = new StorageType[1];
+        for (int i = 0; i < sourceNodes.size(); i++) {
+          DatanodeInfo node = sourceNodes.get(i);
+          if (node.equals(dn)) {
+            srcNode[0] = node;
+            srcStorageType[0] = sourceStorageTypes.get(i);
+            targetNode[0] = targetNodes.get(i);
+            targetStorageType[0] = targetStorageTypes.get(i);
+
+            // construct internal block
+            long blockId = blockInfo.getBlockId() + si.getBlockIndex();
+            long numBytes = StripedBlockUtil.getInternalBlockLength(
+                sBlockInfo.getNumBytes(), sBlockInfo.getCellSize(),
+                sBlockInfo.getDataBlockNum(), si.getBlockIndex());
+            Block blk = new Block(blockId, numBytes,
+                blockInfo.getGenerationStamp());
+            BlockMovingInfo blkMovingInfo = new BlockMovingInfo(blk, srcNode,
+                targetNode, srcStorageType, targetStorageType);
+            blkMovingInfos.add(blkMovingInfo);
+            break; // found matching source-target nodes
+          }
+        }
+      }
+    }
+  }
+
   /**
    * Choose the target storage within same datanode if possible.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1c2624/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
new file mode 100644
index 0000000..5f8639f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -0,0 +1,469 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
+ * to be moved and finding its expected target locations in order to satisfy the
+ * storage policy.
+ */
+public class TestStoragePolicySatisfierWithStripedFile {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
+
+  private final int stripesPerBlock = 2;
+
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocks;
+  private int cellSize;
+  private int defaultStripeBlockSize;
+
+  private ErasureCodingPolicy getEcPolicy() {
+    return ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  }
+
+  /**
+   * Initialize erasure coding policy.
+   */
+  @Before
+  public void init(){
+    ecPolicy = getEcPolicy();
+    dataBlocks = ecPolicy.getNumDataUnits();
+    parityBlocks = ecPolicy.getNumParityUnits();
+    cellSize = ecPolicy.getCellSize();
+    defaultStripeBlockSize = cellSize * stripesPerBlock;
+  }
+
+  /**
+   * Tests to verify that all the striped blocks(data + parity blocks) are
+   * moving to satisfy the storage policy.
+   */
+  @Test(timeout = 300000)
+  public void testMoverWithFullStripe() throws Exception {
+    // start 10 datanodes
+    int numOfDatanodes = 10;
+    int storagesPerDatanode = 2;
+    long capacity = 20 * defaultStripeBlockSize;
+    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+    for (int i = 0; i < numOfDatanodes; i++) {
+      for (int j = 0; j < storagesPerDatanode; j++) {
+        capacities[i][j] = capacity;
+      }
+    }
+
+    final Configuration conf = new HdfsConfiguration();
+    initConfWithStripe(conf, defaultStripeBlockSize);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numOfDatanodes)
+        .storagesPerDatanode(storagesPerDatanode)
+        .storageTypes(new StorageType[][]{
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE}})
+        .storageCapacities(capacities)
+        .build();
+
+    HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    try {
+      cluster.waitActive();
+
+      // set "/bar" directory with HOT storage policy.
+      ClientProtocol client = NameNodeProxies.createProxy(conf,
+          cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+      String barDir = "/bar";
+      client.mkdirs(barDir, new FsPermission((short) 777), true);
+      client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
+      // set an EC policy on "/bar" directory
+      client.setErasureCodingPolicy(barDir, null);
+
+      // write file to barDir
+      final String fooFile = "/bar/foo";
+      long fileLen = cellSize * dataBlocks;
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile),
+          fileLen, (short) 3, 0);
+
+      // verify storage types and locations
+      LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0,
+          fileLen);
+      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+        for (StorageType type : lb.getStorageTypes()) {
+          Assert.assertEquals(StorageType.DISK, type);
+        }
+      }
+      StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
+          dataBlocks + parityBlocks);
+
+      // start 5 more datanodes
+      int numOfNewDatanodes = 5;
+      capacities = new long[numOfNewDatanodes][storagesPerDatanode];
+      for (int i = 0; i < numOfNewDatanodes; i++) {
+        for (int j = 0; j < storagesPerDatanode; j++) {
+          capacities[i][j] = capacity;
+        }
+      }
+      cluster.startDataNodes(conf, 5,
+          new StorageType[][]{
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE}},
+          true, null, null, null, capacities, null, false, false, false, null);
+      cluster.triggerHeartbeats();
+
+      // move file to ARCHIVE
+      client.setStoragePolicy(barDir, "COLD");
+      hdfsAdmin.satisfyStoragePolicy(new Path(fooFile));
+      LOG.info("Sets storage policy to COLD and invoked satisfyStoragePolicy");
+      cluster.triggerHeartbeats();
+
+      waitForBlocksMovementResult(cluster, 1, 60000);
+      // verify storage types and locations
+      waitExpectedStorageType(cluster, fooFile, fileLen, StorageType.ARCHIVE, 9,
+          9, 60000);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests to verify that only few datanodes are available and few striped
+   * blocks are able to move. Others are still trying to find available nodes.
+   *
+   * For example, we have 3 nodes A(disk, disk), B(disk, disk), C(disk, archive)
+   *
+   * Assume a block with storage locations A(disk), B(disk), C(disk). Now, set
+   * policy as COLD and invoked {@link HdfsAdmin#satisfyStoragePolicy(Path)},
+   * while choosing the target node for A, it shouldn't choose C. For C, it
+   * should do local block movement as it has ARCHIVE storage type.
+   */
+  @Test(timeout = 300000)
+  public void testWhenOnlyFewTargetNodesAreAvailableToSatisfyStoragePolicy()
+      throws Exception {
+    // start 10 datanodes
+    int numOfDatanodes = 10;
+    int storagesPerDatanode = 2;
+    long capacity = 20 * defaultStripeBlockSize;
+    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+    for (int i = 0; i < numOfDatanodes; i++) {
+      for (int j = 0; j < storagesPerDatanode; j++) {
+        capacities[i][j] = capacity;
+      }
+    }
+
+    final Configuration conf = new HdfsConfiguration();
+    initConfWithStripe(conf, defaultStripeBlockSize);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numOfDatanodes)
+        .storagesPerDatanode(storagesPerDatanode)
+        .storageTypes(new StorageType[][]{
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.ARCHIVE}})
+        .storageCapacities(capacities)
+        .build();
+
+    HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    try {
+      cluster.waitActive();
+
+      // set "/bar" directory with HOT storage policy.
+      ClientProtocol client = NameNodeProxies.createProxy(conf,
+          cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+      String barDir = "/bar";
+      client.mkdirs(barDir, new FsPermission((short) 777), true);
+      client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
+      // set an EC policy on "/bar" directory
+      client.setErasureCodingPolicy(barDir, null);
+
+      // write file to barDir
+      final String fooFile = "/bar/foo";
+      long fileLen = cellSize * dataBlocks;
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile),
+          fileLen, (short) 3, 0);
+
+      // verify storage types and locations
+      LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0,
+          fileLen);
+      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+        for (StorageType type : lb.getStorageTypes()) {
+          Assert.assertEquals(StorageType.DISK, type);
+        }
+      }
+      StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
+          dataBlocks + parityBlocks);
+
+      // start 2 more datanodes
+      int numOfNewDatanodes = 2;
+      capacities = new long[numOfNewDatanodes][storagesPerDatanode];
+      for (int i = 0; i < numOfNewDatanodes; i++) {
+        for (int j = 0; j < storagesPerDatanode; j++) {
+          capacities[i][j] = capacity;
+        }
+      }
+      cluster.startDataNodes(conf, 2,
+          new StorageType[][]{
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE}},
+          true, null, null, null, capacities, null, false, false, false, null);
+      cluster.triggerHeartbeats();
+
+      // Move file to ARCHIVE. Only 5 datanodes are available with ARCHIVE
+      // storage type.
+      client.setStoragePolicy(barDir, "COLD");
+      hdfsAdmin.satisfyStoragePolicy(new Path(fooFile));
+      LOG.info("Sets storage policy to COLD and invoked satisfyStoragePolicy");
+      cluster.triggerHeartbeats();
+
+      waitForBlocksMovementResult(cluster, 1, 60000);
+      waitForAttemptedItems(cluster, 1, 30000);
+      // verify storage types and locations.
+      waitExpectedStorageType(cluster, fooFile, fileLen, StorageType.ARCHIVE, 5,
+          9, 60000);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests to verify that for the given path, no blocks under the given path
+   * will be scheduled for block movement as there are no available datanode
+   * with required storage type.
+   *
+   * For example, there are two block for a file:
+   *
+   * File1 => blk_1[locations=A(DISK),B(DISK),C(DISK)],
+   * blk_2[locations=A(DISK),B(DISK),C(DISK)]. Now, set storage policy to COLD.
+   * No datanode is available with storage type ARCHIVE.
+   *
+   * SPS won't schedule any block movement for this path.
+   */
+  @Test(timeout = 300000)
+  public void testWhenNoTargetDatanodeToSatisfyStoragePolicy()
+      throws Exception {
+    // start 10 datanodes
+    int numOfDatanodes = 10;
+    int storagesPerDatanode = 2;
+    long capacity = 20 * defaultStripeBlockSize;
+    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+    for (int i = 0; i < numOfDatanodes; i++) {
+      for (int j = 0; j < storagesPerDatanode; j++) {
+        capacities[i][j] = capacity;
+      }
+    }
+
+    final Configuration conf = new HdfsConfiguration();
+    initConfWithStripe(conf, defaultStripeBlockSize);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numOfDatanodes)
+        .storagesPerDatanode(storagesPerDatanode)
+        .storageTypes(new StorageType[][]{
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK}})
+        .storageCapacities(capacities)
+        .build();
+
+    HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    try {
+      cluster.waitActive();
+
+      // set "/bar" directory with HOT storage policy.
+      ClientProtocol client = NameNodeProxies.createProxy(conf,
+          cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+      String barDir = "/bar";
+      client.mkdirs(barDir, new FsPermission((short) 777), true);
+      client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
+      // set an EC policy on "/bar" directory
+      client.setErasureCodingPolicy(barDir, null);
+
+      // write file to barDir
+      final String fooFile = "/bar/foo";
+      long fileLen = cellSize * dataBlocks;
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile),
+          fileLen, (short) 3, 0);
+
+      // verify storage types and locations
+      LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0,
+          fileLen);
+      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+        for (StorageType type : lb.getStorageTypes()) {
+          Assert.assertEquals(StorageType.DISK, type);
+        }
+      }
+      StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
+          dataBlocks + parityBlocks);
+
+      // Move file to ARCHIVE. Only 5 datanodes are available with ARCHIVE
+      // storage type.
+      client.setStoragePolicy(barDir, "COLD");
+      hdfsAdmin.satisfyStoragePolicy(new Path(fooFile));
+      LOG.info("Sets storage policy to COLD and invoked satisfyStoragePolicy");
+      cluster.triggerHeartbeats();
+
+      waitForAttemptedItems(cluster, 1, 30000);
+      // verify storage types and locations.
+      waitExpectedStorageType(cluster, fooFile, fileLen, StorageType.DISK, 9, 9,
+          60000);
+      waitForAttemptedItems(cluster, 1, 30000);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  private void waitForAttemptedItems(MiniDFSCluster cluster,
+      long expectedBlkMovAttemptedCount, int timeout)
+          throws TimeoutException, InterruptedException {
+    BlockManager blockManager = cluster.getNamesystem().getBlockManager();
+    final StoragePolicySatisfier sps = blockManager.getStoragePolicySatisfier();
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        LOG.info("expectedAttemptedItemsCount={} actualAttemptedItemsCount={}",
+            expectedBlkMovAttemptedCount,
+            sps.getAttemptedItemsMonitor().getAttemptedItemsCount());
+        return sps.getAttemptedItemsMonitor()
+            .getAttemptedItemsCount() == expectedBlkMovAttemptedCount;
+      }
+    }, 100, timeout);
+  }
+
+  private static void initConfWithStripe(Configuration conf,
+      int stripeBlockSize) {
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, stripeBlockSize);
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+        1L);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
+        false);
+  }
+
+  // Check whether the Block movement has been successfully completed to satisfy
+  // the storage policy for the given file.
+  private void waitExpectedStorageType(MiniDFSCluster cluster,
+      final String fileName, long fileLen,
+      final StorageType expectedStorageType, int expectedStorageCount,
+      int expectedBlkLocationCount, int timeout) throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        int actualStorageCount = 0;
+        try {
+          LocatedBlocks locatedBlocks = cluster.getFileSystem().getClient()
+              .getLocatedBlocks(fileName, 0, fileLen);
+          for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+            LOG.info("LocatedBlocks => Size {}, locs {}",
+                lb.getLocations().length, lb);
+            if (lb.getLocations().length > expectedBlkLocationCount) {
+              return false;
+            }
+            for (StorageType storageType : lb.getStorageTypes()) {
+              if (expectedStorageType == storageType) {
+                actualStorageCount++;
+              } else {
+                LOG.info("Expected storage type {} and actual {}",
+                    expectedStorageType, storageType);
+              }
+            }
+          }
+          LOG.info(
+              expectedStorageType + " replica count, expected={} and actual={}",
+              expectedStorageCount, actualStorageCount);
+        } catch (IOException e) {
+          LOG.error("Exception while getting located blocks", e);
+          return false;
+        }
+        return expectedStorageCount == actualStorageCount;
+      }
+    }, 100, timeout);
+  }
+
+  // Check whether the block movement result has been arrived at the
+  // Namenode(SPS).
+  private void waitForBlocksMovementResult(MiniDFSCluster cluster,
+      long expectedBlkMovResultsCount, int timeout)
+          throws TimeoutException, InterruptedException {
+    BlockManager blockManager = cluster.getNamesystem().getBlockManager();
+    final StoragePolicySatisfier sps = blockManager.getStoragePolicySatisfier();
+    Assert.assertNotNull("Failed to get SPS object reference!", sps);
+
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        LOG.info("expectedResultsCount={} actualResultsCount={}",
+            expectedBlkMovResultsCount,
+            sps.getAttemptedItemsMonitor().resultsCount());
+        return sps.getAttemptedItemsMonitor()
+            .resultsCount() == expectedBlkMovResultsCount;
+      }
+    }, 100, timeout);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HDFS-11293: [SPS]: Local DN should be given preference as source node, when target available in same node. Contributed by Yuanbo Liu and Uma Maheswara Rao G

Posted by ra...@apache.org.
HDFS-11293: [SPS]: Local DN should be given preference as source node, when target available in same node. Contributed by Yuanbo Liu and Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6289650e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6289650e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6289650e

Branch: refs/heads/HDFS-10285
Commit: 6289650e7cb3b4b35904ee50b8939c908df7b76b
Parents: 40ced2b
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Mon Jan 9 14:37:42 2017 -0800
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:54:50 2017 +0530

----------------------------------------------------------------------
 .../server/namenode/StoragePolicySatisfier.java | 49 ++++++++++++--
 .../namenode/TestStoragePolicySatisfier.java    | 71 ++++++++++++++++++++
 2 files changed, 113 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6289650e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index ee59617..b1b1464 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -298,9 +298,25 @@ public class StoragePolicySatisfier implements Runnable {
           new ArrayList<StorageTypeNodePair>();
       List<DatanodeStorageInfo> existingBlockStorages =
           new ArrayList<DatanodeStorageInfo>(Arrays.asList(storages));
+      // if expected type exists in source node already, local movement would be
+      // possible, so lets find such sources first.
+      Iterator<DatanodeStorageInfo> iterator = existingBlockStorages.iterator();
+      while (iterator.hasNext()) {
+        DatanodeStorageInfo datanodeStorageInfo = iterator.next();
+        if (checkSourceAndTargetTypeExists(
+            datanodeStorageInfo.getDatanodeDescriptor(), existing,
+            expectedStorageTypes)) {
+          sourceWithStorageMap
+              .add(new StorageTypeNodePair(datanodeStorageInfo.getStorageType(),
+                  datanodeStorageInfo.getDatanodeDescriptor()));
+          iterator.remove();
+          existing.remove(datanodeStorageInfo.getStorageType());
+        }
+      }
+
+      // Let's find sources for existing types left.
       for (StorageType existingType : existing) {
-        Iterator<DatanodeStorageInfo> iterator =
-            existingBlockStorages.iterator();
+        iterator = existingBlockStorages.iterator();
         while (iterator.hasNext()) {
           DatanodeStorageInfo datanodeStorageInfo = iterator.next();
           StorageType storageType = datanodeStorageInfo.getStorageType();
@@ -317,7 +333,7 @@ public class StoragePolicySatisfier implements Runnable {
           findTargetsForExpectedStorageTypes(expectedStorageTypes);
 
       foundMatchingTargetNodesForBlock |= findSourceAndTargetToMove(
-          blockMovingInfos, blockInfo, existing, sourceWithStorageMap,
+          blockMovingInfos, blockInfo, sourceWithStorageMap,
           expectedStorageTypes, locsForExpectedStorageTypes);
     }
     return foundMatchingTargetNodesForBlock;
@@ -366,8 +382,6 @@ public class StoragePolicySatisfier implements Runnable {
    *          - list of block source and target node pair
    * @param blockInfo
    *          - Block
-   * @param existing
-   *          - Existing storage types of block
    * @param sourceWithStorageList
    *          - Source Datanode with storages list
    * @param expected
@@ -379,7 +393,6 @@ public class StoragePolicySatisfier implements Runnable {
    */
   private boolean findSourceAndTargetToMove(
       List<BlockMovingInfo> blockMovingInfos, BlockInfo blockInfo,
-      List<StorageType> existing,
       List<StorageTypeNodePair> sourceWithStorageList,
       List<StorageType> expected,
       StorageTypeNodeMap locsForExpectedStorageTypes) {
@@ -403,6 +416,7 @@ public class StoragePolicySatisfier implements Runnable {
         targetNodes.add(chosenTarget.dn);
         targetStorageTypes.add(chosenTarget.storageType);
         chosenNodes.add(chosenTarget.dn);
+        expected.remove(chosenTarget.storageType);
         // TODO: We can increment scheduled block count for this node?
       }
     }
@@ -442,16 +456,20 @@ public class StoragePolicySatisfier implements Runnable {
         targetNodes.add(chosenTarget.dn);
         targetStorageTypes.add(chosenTarget.storageType);
         chosenNodes.add(chosenTarget.dn);
+        expected.remove(chosenTarget.storageType);
         // TODO: We can increment scheduled block count for this node?
       } else {
         LOG.warn(
             "Failed to choose target datanode for the required"
                 + " storage types {}, block:{}, existing storage type:{}",
             expected, blockInfo, existingTypeNodePair.storageType);
-        foundMatchingTargetNodesForBlock = false;
       }
     }
 
+    if (expected.size() > 0) {
+      foundMatchingTargetNodesForBlock = false;
+    }
+
     blockMovingInfos.addAll(getBlockMovingInfos(blockInfo, sourceNodes,
         sourceStorageTypes, targetNodes, targetStorageTypes));
     return foundMatchingTargetNodesForBlock;
@@ -616,6 +634,23 @@ public class StoragePolicySatisfier implements Runnable {
     return max;
   }
 
+  private boolean checkSourceAndTargetTypeExists(DatanodeDescriptor dn,
+      List<StorageType> existing, List<StorageType> expectedStorageTypes) {
+    DatanodeStorageInfo[] allDNStorageInfos = dn.getStorageInfos();
+    boolean isExpectedTypeAvailable = false;
+    boolean isExistingTypeAvailable = false;
+    for (DatanodeStorageInfo dnInfo : allDNStorageInfos) {
+      StorageType storageType = dnInfo.getStorageType();
+      if (existing.contains(storageType)) {
+        isExistingTypeAvailable = true;
+      }
+      if (expectedStorageTypes.contains(storageType)) {
+        isExpectedTypeAvailable = true;
+      }
+    }
+    return isExistingTypeAvailable && isExpectedTypeAvailable;
+  }
+
   private static class StorageTypeNodeMap {
     private final EnumMap<StorageType, List<DatanodeDescriptor>> typeNodeMap =
         new EnumMap<StorageType, List<DatanodeDescriptor>>(StorageType.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6289650e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 718dbcb..9abb78d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -580,6 +580,77 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Tests that moving block storage with in the same datanode. Let's say we
+   * have DN1[DISK,ARCHIVE], DN2[DISK, SSD], DN3[DISK,RAM_DISK] when
+   * storagepolicy set to ONE_SSD and request satisfyStoragePolicy, then block
+   * should move to DN2[SSD] successfully.
+   */
+  @Test(timeout = 300000)
+  public void testBlockMoveInSameDatanodeWithONESSD() throws Exception {
+    StorageType[][] diskTypes =
+        new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.RAM_DISK}};
+    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    try {
+      hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
+          storagesPerDatanode, capacity);
+      dfs = hdfsCluster.getFileSystem();
+      writeContent(file);
+
+      // Change policy to ONE_SSD
+      dfs.setStoragePolicy(new Path(file), "ONE_SSD");
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      hdfsCluster.triggerHeartbeats();
+      waitExpectedStorageType(file, StorageType.SSD, 1, 30000);
+      waitExpectedStorageType(file, StorageType.DISK, 2, 30000);
+
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests that moving block storage with in the same datanode and remote node.
+   * Let's say we have DN1[DISK,ARCHIVE], DN2[ARCHIVE, SSD], DN3[DISK,DISK],
+   * DN4[DISK,DISK] when storagepolicy set to WARM and request
+   * satisfyStoragePolicy, then block should move to DN1[ARCHIVE] and
+   * DN2[ARCHIVE] successfully.
+   */
+  @Test(timeout = 300000)
+  public void testBlockMoveInSameAndRemoteDatanodesWithWARM() throws Exception {
+    StorageType[][] diskTypes =
+        new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.SSD},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK}};
+
+    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    try {
+      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+          storagesPerDatanode, capacity);
+      dfs = hdfsCluster.getFileSystem();
+      writeContent(file);
+
+      // Change policy to WARM
+      dfs.setStoragePolicy(new Path(file), "WARM");
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      hdfsCluster.triggerHeartbeats();
+
+      waitExpectedStorageType(file, StorageType.DISK, 1, 30000);
+      waitExpectedStorageType(file, StorageType.ARCHIVE, 2, 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
   private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
       throws IOException {
     ArrayList<DataNode> dns = hdfsCluster.getDataNodes();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: YARN-7052. RM SchedulingMonitor gives no indication why the spawned thread crashed. Contributed by Eric Payne

Posted by ra...@apache.org.
YARN-7052. RM SchedulingMonitor gives no indication why the spawned thread crashed. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39a9dc8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39a9dc8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39a9dc8e

Branch: refs/heads/HDFS-10285
Commit: 39a9dc8e4a6e1d13658867ad756878d3dd6352b0
Parents: 8455d70
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Aug 25 15:31:29 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Aug 25 15:31:29 2017 -0500

----------------------------------------------------------------------
 .../server/resourcemanager/monitor/SchedulingMonitor.java    | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39a9dc8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index 1e3f691..631d1a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -100,9 +100,11 @@ public class SchedulingMonitor extends AbstractService {
       try {
         //invoke the preemption policy
         invokePolicy();
-      } catch (YarnRuntimeException e) {
-        LOG.error("YarnRuntimeException raised while executing preemption"
-            + " checker, skip this run..., exception=", e);
+      } catch (Throwable t) {
+        // The preemption monitor does not alter structures nor do structures
+        // persist across invocations. Therefore, log, skip, and retry.
+        LOG.error("Exception raised while executing preemption"
+            + " checker, skip this run..., exception=", t);
       }
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HDFS-10954. [SPS]: Provide mechanism to send blocks movement result back to NN from coordinator DN. Contributed by Rakesh R

Posted by ra...@apache.org.
HDFS-10954. [SPS]: Provide mechanism to send blocks movement result back to NN from coordinator DN. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7130518e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7130518e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7130518e

Branch: refs/heads/HDFS-10285
Commit: 7130518ef30ae6a01e946f93d6a86eb5cafdc9a0
Parents: d5058b0
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Thu Nov 3 09:39:14 2016 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:50:19 2017 +0530

----------------------------------------------------------------------
 .../DatanodeProtocolClientSideTranslatorPB.java |  9 ++-
 .../DatanodeProtocolServerSideTranslatorPB.java |  4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 52 ++++++++++++++
 .../server/blockmanagement/BlockManager.java    |  4 ++
 .../hdfs/server/datanode/BPServiceActor.java    | 25 ++++++-
 .../datanode/StoragePolicySatisfyWorker.java    | 75 ++++++++++++++++----
 .../hdfs/server/namenode/FSNamesystem.java      | 10 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  7 +-
 .../server/namenode/StoragePolicySatisfier.java | 23 ++++++
 .../protocol/BlocksStorageMovementResult.java   | 64 +++++++++++++++++
 .../hdfs/server/protocol/DatanodeProtocol.java  |  5 +-
 .../src/main/proto/DatanodeProtocol.proto       | 14 ++++
 .../TestNameNodePrunesMissingStorages.java      |  3 +-
 .../datanode/InternalDataNodeTestUtils.java     |  4 +-
 .../server/datanode/TestBPOfferService.java     |  8 ++-
 .../hdfs/server/datanode/TestBlockRecovery.java |  4 +-
 .../server/datanode/TestDataNodeLifeline.java   |  7 +-
 .../TestDatanodeProtocolRetryPolicy.java        |  4 +-
 .../server/datanode/TestFsDatasetCache.java     |  4 +-
 .../TestStoragePolicySatisfyWorker.java         | 13 ++--
 .../hdfs/server/datanode/TestStorageReport.java |  4 +-
 .../server/namenode/NNThroughputBenchmark.java  |  9 +--
 .../hdfs/server/namenode/NameNodeAdapter.java   |  4 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |  5 +-
 .../namenode/TestStoragePolicySatisfier.java    | 50 +++++++++++++
 25 files changed, 368 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 9cc4516..9dd87d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -138,7 +139,8 @@ public class DatanodeProtocolClientSideTranslatorPB implements
       VolumeFailureSummary volumeFailureSummary,
       boolean requestFullBlockReportLease,
       @Nonnull SlowPeerReports slowPeers,
-      @Nonnull SlowDiskReports slowDisks) throws IOException {
+      @Nonnull SlowDiskReports slowDisks,
+      BlocksStorageMovementResult[] blksMovementResults) throws IOException {
     HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
         .setRegistration(PBHelper.convert(registration))
         .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
@@ -161,6 +163,11 @@ public class DatanodeProtocolClientSideTranslatorPB implements
     if (slowDisks.haveSlowDisks()) {
       builder.addAllSlowDisks(PBHelper.convertSlowDiskInfo(slowDisks));
     }
+
+    // Adding blocks movement results to the heart beat request.
+    builder.addAllBlksMovementResults(
+        PBHelper.convertBlksMovResults(blksMovementResults));
+
     HeartbeatResponseProto resp;
     try {
       resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
index 5cba284..40458ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
@@ -122,7 +122,9 @@ public class DatanodeProtocolServerSideTranslatorPB implements
           request.getXceiverCount(), request.getFailedVolumes(),
           volumeFailureSummary, request.getRequestFullBlockReportLease(),
           PBHelper.convertSlowPeerInfo(request.getSlowPeersList()),
-          PBHelper.convertSlowDiskInfo(request.getSlowDisksList()));
+          PBHelper.convertSlowDiskInfo(request.getSlowDisksList()),
+          PBHelper.convertBlksMovResults(
+              request.getBlksMovementResultsList()));
     } catch (IOException e) {
       throw new ServiceException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 83f3454..156c9c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailur
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockStorageMovementCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockStorageMovementProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlocksStorageMovementResultProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@@ -98,6 +99,8 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStr
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult.Status;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
@@ -952,6 +955,55 @@ public class PBHelper {
     return SlowDiskReports.create(slowDisksMap);
   }
 
+  public static BlocksStorageMovementResult[] convertBlksMovResults(
+      List<BlocksStorageMovementResultProto> protos) {
+    BlocksStorageMovementResult[] results =
+        new BlocksStorageMovementResult[protos.size()];
+    for (int i = 0; i < protos.size(); i++) {
+      BlocksStorageMovementResultProto resultProto = protos.get(i);
+      BlocksStorageMovementResult.Status status;
+      switch (resultProto.getStatus()) {
+      case SUCCESS:
+        status = Status.SUCCESS;
+        break;
+      case FAILURE:
+        status = Status.FAILURE;
+        break;
+      default:
+        throw new AssertionError("Unknown status: " + resultProto.getStatus());
+      }
+      results[i] = new BlocksStorageMovementResult(resultProto.getTrackID(),
+          status);
+    }
+    return results;
+  }
+
+  public static List<BlocksStorageMovementResultProto> convertBlksMovResults(
+      BlocksStorageMovementResult[] blocksMovementResults) {
+    List<BlocksStorageMovementResultProto> blocksMovementResultsProto =
+        new ArrayList<>();
+    BlocksStorageMovementResultProto.Builder builder =
+        BlocksStorageMovementResultProto.newBuilder();
+    for (int i = 0; i < blocksMovementResults.length; i++) {
+      BlocksStorageMovementResult report = blocksMovementResults[i];
+      builder.setTrackID(report.getTrackId());
+      BlocksStorageMovementResultProto.Status status;
+      switch (report.getStatus()) {
+      case SUCCESS:
+        status = BlocksStorageMovementResultProto.Status.SUCCESS;
+        break;
+      case FAILURE:
+        status = BlocksStorageMovementResultProto.Status.FAILURE;
+        break;
+      default:
+        throw new AssertionError("Unknown status: " + report.getStatus());
+      }
+      builder.setStatus(status);
+      blocksMovementResultsProto.add(builder.build());
+    }
+    return blocksMovementResultsProto;
+  }
+
   public static JournalInfo convert(JournalInfoProto info) {
     int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
     int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index fbf62c6..8cc09a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4852,4 +4852,8 @@ public class BlockManager implements BlockStatsMXBean {
   public void satisfyStoragePolicy(long id) {
     storageMovementNeeded.add(id);
   }
+
+  public StoragePolicySatisfier getStoragePolicySatisfier() {
+    return sps;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index a94d2df..0f93fb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -511,6 +512,10 @@ class BPServiceActor implements Runnable {
         outliersReportDue && dn.getDiskMetrics() != null ?
             SlowDiskReports.create(dn.getDiskMetrics().getDiskOutliersStats()) :
             SlowDiskReports.EMPTY_REPORT;
+
+    BlocksStorageMovementResult[] blksMovementResults =
+        getBlocksMovementResults();
+
     HeartbeatResponse response = bpNamenode.sendHeartbeat(bpRegistration,
         reports,
         dn.getFSDataset().getCacheCapacity(),
@@ -521,15 +526,33 @@ class BPServiceActor implements Runnable {
         volumeFailureSummary,
         requestBlockReportLease,
         slowPeers,
-        slowDisks);
+        slowDisks,
+        blksMovementResults);
 
     if (outliersReportDue) {
       // If the report was due and successfully sent, schedule the next one.
       scheduler.scheduleNextOutlierReport();
     }
+
+    // Remove the blocks movement results after successfully transferring
+    // to namenode.
+    dn.getStoragePolicySatisfyWorker().getBlocksMovementsCompletionHandler()
+        .remove(blksMovementResults);
+
     return response;
   }
 
+  private BlocksStorageMovementResult[] getBlocksMovementResults() {
+    List<BlocksStorageMovementResult> trackIdVsMovementStatus = dn
+        .getStoragePolicySatisfyWorker().getBlocksMovementsCompletionHandler()
+        .getBlksMovementResults();
+    BlocksStorageMovementResult[] blksMovementResult =
+        new BlocksStorageMovementResult[trackIdVsMovementStatus.size()];
+    trackIdVsMovementStatus.toArray(blksMovementResult);
+
+    return blksMovementResult;
+  }
+
   @VisibleForTesting
   void sendLifelineForTests() throws IOException {
     lifelineSender.sendLifeline();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 604fb4a..1bd851e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -29,6 +29,7 @@ import java.io.OutputStream;
 import java.net.Socket;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.concurrent.Callable;
@@ -57,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
@@ -218,7 +220,8 @@ public class StoragePolicySatisfyWorker {
         OutputStream unbufOut = sock.getOutputStream();
         InputStream unbufIn = sock.getInputStream();
         Token<BlockTokenIdentifier> accessToken = datanode.getBlockAccessToken(
-            extendedBlock, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
+            extendedBlock, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE),
+            new StorageType[]{targetStorageType}, new String[0]);
 
         DataEncryptionKeyFactory keyFactory = datanode
             .getDataEncryptionKeyFactoryForBlock(extendedBlock);
@@ -257,7 +260,7 @@ public class StoragePolicySatisfyWorker {
         Token<BlockTokenIdentifier> accessToken, DatanodeInfo srcDn,
         StorageType destinStorageType) throws IOException {
       new Sender(out).replaceBlock(eb, destinStorageType, accessToken,
-          srcDn.getDatanodeUuid(), srcDn);
+          srcDn.getDatanodeUuid(), srcDn, null);
     }
 
     /** Receive a reportedBlock copy response from the input stream. */
@@ -276,7 +279,7 @@ public class StoragePolicySatisfyWorker {
   /**
    * Block movement status code.
    */
-  enum BlockMovementStatus {
+  public static enum BlockMovementStatus {
     /** Success. */
     DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
     /**
@@ -343,26 +346,72 @@ public class StoragePolicySatisfyWorker {
 
   /**
    * Blocks movements completion handler, which is used to collect details of
-   * the completed list of block movements and notify the namenode about the
-   * success or failures.
+   * the completed list of block movements and this status(success or failure)
+   * will be send to the namenode via heartbeat.
    */
   static class BlocksMovementsCompletionHandler {
-    private final List<BlockMovementResult> completedBlocks = new ArrayList<>();
+    private final List<BlocksStorageMovementResult> trackIdVsMovementStatus =
+        new ArrayList<>();
 
     /**
-     * Collect all the block movement results and notify namenode.
+     * Collect all the block movement results. Later this will be send to
+     * namenode via heart beat.
      *
      * @param results
      *          result of all the block movements per trackId
      */
-    void handle(List<BlockMovementResult> results) {
-      completedBlocks.addAll(results);
-      // TODO: notify namenode about the success/failures.
+    void handle(List<BlockMovementResult> resultsPerTrackId) {
+      BlocksStorageMovementResult.Status status =
+          BlocksStorageMovementResult.Status.SUCCESS;
+      long trackId = -1;
+      for (BlockMovementResult blockMovementResult : resultsPerTrackId) {
+        trackId = blockMovementResult.getTrackId();
+        if (blockMovementResult.status ==
+            BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE) {
+          status = BlocksStorageMovementResult.Status.FAILURE;
+          // If any of the block movement is failed, then mark as failure so
+          // that namenode can take a decision to retry the blocks associated to
+          // the given trackId.
+          break;
+        }
+      }
+
+      // Adding to the tracking results list. Later this will be send to
+      // namenode via datanode heartbeat.
+      synchronized (trackIdVsMovementStatus) {
+        trackIdVsMovementStatus.add(
+            new BlocksStorageMovementResult(trackId, status));
+      }
+    }
+
+    /**
+     * @return unmodifiable list of blocks storage movement results.
+     */
+    List<BlocksStorageMovementResult> getBlksMovementResults() {
+      synchronized (trackIdVsMovementStatus) {
+        if (trackIdVsMovementStatus.size() <= 0) {
+          return new ArrayList<>();
+        }
+        List<BlocksStorageMovementResult> results = Collections
+            .unmodifiableList(trackIdVsMovementStatus);
+        return results;
+      }
     }
 
-    @VisibleForTesting
-    List<BlockMovementResult> getCompletedBlocks() {
-      return completedBlocks;
+    /**
+     * Remove the blocks storage movement results.
+     *
+     * @param results
+     *          set of blocks storage movement results
+     */
+    void remove(BlocksStorageMovementResult[] results) {
+      if (results != null) {
+        synchronized (trackIdVsMovementStatus) {
+          for (BlocksStorageMovementResult blocksMovementResult : results) {
+            trackIdVsMovementStatus.remove(blocksMovementResult);
+          }
+        }
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 12d96d8..d08a3fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -262,6 +262,7 @@ import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
 import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -3732,7 +3733,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       VolumeFailureSummary volumeFailureSummary,
       boolean requestFullBlockReportLease,
       @Nonnull SlowPeerReports slowPeers,
-      @Nonnull SlowDiskReports slowDisks) throws IOException {
+      @Nonnull SlowDiskReports slowDisks,
+      BlocksStorageMovementResult[] blksMovementResults) throws IOException {
     readLock();
     try {
       //get datanode commands
@@ -3746,6 +3748,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (requestFullBlockReportLease) {
         blockReportLeaseId =  blockManager.requestBlockReportLeaseId(nodeReg);
       }
+
+      // TODO: Handle blocks movement results send by the coordinator datanode.
+      // This has to be revisited as part of HDFS-11029.
+      blockManager.getStoragePolicySatisfier()
+          .handleBlocksStorageMovementResults(blksMovementResults);
+
       //create ha status
       final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
           haContext.getState().getServiceState(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 3fbb7bd..20a5783 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -150,6 +150,7 @@ import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -1459,13 +1460,15 @@ public class NameNodeRpcServer implements NamenodeProtocols {
       int failedVolumes, VolumeFailureSummary volumeFailureSummary,
       boolean requestFullBlockReportLease,
       @Nonnull SlowPeerReports slowPeers,
-      @Nonnull SlowDiskReports slowDisks) throws IOException {
+      @Nonnull SlowDiskReports slowDisks,
+      BlocksStorageMovementResult[] blkMovementStatus) throws IOException {
     checkNNStartup();
     verifyRequest(nodeReg);
     return namesystem.handleHeartbeat(nodeReg, report,
         dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
         failedVolumes, volumeFailureSummary, requestFullBlockReportLease,
-        slowPeers, slowDisks);
+        slowPeers, slowDisks,
+        blkMovementStatus);
   }
 
   @Override // DatanodeProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index b5aed37..fbe686a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -39,11 +39,14 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Setting storagePolicy on a file after the file write will only update the new
  * storage policy type in Namespace, but physical block storage movement will
@@ -394,4 +397,24 @@ public class StoragePolicySatisfier implements Runnable {
       return typeNodeMap.get(type);
     }
   }
+
+  // TODO: Temporarily keeping the results for assertion. This has to be
+  // revisited as part of HDFS-11029.
+  @VisibleForTesting
+  List<BlocksStorageMovementResult> results = new ArrayList<>();
+
+  /**
+   * Receives the movement results of collection of blocks associated to a
+   * trackId.
+   *
+   * @param blksMovementResults
+   *          movement status of the set of blocks associated to a trackId.
+   */
+  void handleBlocksStorageMovementResults(
+      BlocksStorageMovementResult[] blksMovementResults) {
+    if (blksMovementResults.length <= 0) {
+      return;
+    }
+    results.addAll(Arrays.asList(blksMovementResults));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
new file mode 100644
index 0000000..1afba34
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksStorageMovementResult.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+/**
+ * This class represents, movement status of a set of blocks associated to a
+ * track Id.
+ */
+public class BlocksStorageMovementResult {
+
+  private final long trackId;
+  private final Status status;
+
+  /**
+   * SUCCESS - If all the blocks associated to track id has moved successfully
+   * or maximum possible movements done.
+   *
+   * <p>
+   * FAILURE - If any of its(trackId) blocks movement failed and requires to
+   * retry these failed blocks movements. Example selected target node is no
+   * more running or no space. So, retrying by selecting new target node might
+   * work.
+   */
+  public static enum Status {
+    SUCCESS, FAILURE;
+  }
+
+  /**
+   * BlocksStorageMovementResult constructor.
+   *
+   * @param trackId
+   *          tracking identifier
+   * @param status
+   *          block movement status
+   */
+  public BlocksStorageMovementResult(long trackId, Status status) {
+    this.trackId = trackId;
+    this.status = status;
+  }
+
+  public long getTrackId() {
+    return trackId;
+  }
+
+  public Status getStatus() {
+    return status;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index 283f367..858f59b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -111,6 +111,8 @@ public interface DatanodeProtocol {
    * @param slowPeers Details of peer DataNodes that were detected as being
    *                  slow to respond to packet writes. Empty report if no
    *                  slow peers were detected by the DataNode.
+   * @param blksMovementResults array of movement status of a set of blocks
+   *                            associated to a trackId.
    * @throws IOException on error
    */
   @Idempotent
@@ -124,7 +126,8 @@ public interface DatanodeProtocol {
                                        VolumeFailureSummary volumeFailureSummary,
                                        boolean requestFullBlockReportLease,
                                        @Nonnull SlowPeerReports slowPeers,
-                                       @Nonnull SlowDiskReports slowDisks)
+                                       @Nonnull SlowDiskReports slowDisks,
+                                       BlocksStorageMovementResult[] blksMovementResults)
       throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index 8e19809..77b0f86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -177,6 +177,18 @@ message BlockStorageMovementProto {
 }
 
 /**
+ * Movement status of the set of blocks associated to a trackId.
+ */
+message BlocksStorageMovementResultProto {
+  enum Status {
+    SUCCESS = 1; // block movement succeeded
+    FAILURE = 2; // block movement failed and needs to retry
+  }
+  required uint64 trackID = 1;
+  required Status status = 2;
+}
+
+/**
  * registration - Information of the datanode registering with the namenode
  */
 message RegisterDatanodeRequestProto {
@@ -219,6 +231,7 @@ message VolumeFailureSummaryProto {
  * volumeFailureSummary - info about volume failures
  * slowPeers - info about peer DataNodes that are suspected to be slow.
  * slowDisks - info about DataNode disks that are suspected to be slow.
+ * blksMovementResults - status of the scheduled blocks movements
  */
 message HeartbeatRequestProto {
   required DatanodeRegistrationProto registration = 1; // Datanode info
@@ -232,6 +245,7 @@ message HeartbeatRequestProto {
   optional bool requestFullBlockReportLease = 9 [ default = false ];
   repeated SlowPeerReportProto slowPeers = 10;
   repeated SlowDiskReportProto slowDisks = 11;
+  repeated BlocksStorageMovementResultProto blksMovementResults = 12;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index 948a8fb..2d58732 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
@@ -116,7 +117,7 @@ public class TestNameNodePrunesMissingStorages {
       cluster.stopDataNode(0);
       cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0,
           0, null, true, SlowPeerReports.EMPTY_REPORT,
-          SlowDiskReports.EMPTY_REPORT);
+          SlowDiskReports.EMPTY_REPORT, new BlocksStorageMovementResult[0]);
 
       // Check that the missing storage was pruned.
       assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
index 876a854..33cf391 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
@@ -140,7 +141,8 @@ public class InternalDataNodeTestUtils {
             Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class),
             Mockito.anyBoolean(),
             Mockito.any(SlowPeerReports.class),
-            Mockito.any(SlowDiskReports.class))).thenReturn(
+            Mockito.any(SlowDiskReports.class),
+            Mockito.any(BlocksStorageMovementResult[].class))).thenReturn(
         new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(
             HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current()
             .nextLong() | 1L));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index ec19926..524b8b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -122,6 +123,8 @@ public class TestBPOfferService {
     Mockito.doReturn(new DNConf(mockDn)).when(mockDn).getDnConf();
     Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
         .when(mockDn).getMetrics();
+    Mockito.doReturn(new StoragePolicySatisfyWorker(conf, mockDn)).when(mockDn)
+        .getStoragePolicySatisfyWorker();
 
     // Set up a simulated dataset with our fake BP
     mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf));
@@ -156,7 +159,8 @@ public class TestBPOfferService {
           Mockito.any(VolumeFailureSummary.class),
           Mockito.anyBoolean(),
           Mockito.any(SlowPeerReports.class),
-          Mockito.any(SlowDiskReports.class));
+          Mockito.any(SlowDiskReports.class),
+          Mockito.any(BlocksStorageMovementResult[].class));
     mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
     datanodeCommands[nnIdx] = new DatanodeCommand[0];
     return mock;
@@ -375,6 +379,8 @@ public class TestBPOfferService {
     Mockito.doReturn(new DNConf(mockDn)).when(mockDn).getDnConf();
     Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
       when(mockDn).getMetrics();
+    Mockito.doReturn(new StoragePolicySatisfyWorker(conf, mockDn)).when(mockDn)
+        .getStoragePolicySatisfyWorker();
     final AtomicInteger count = new AtomicInteger();
     Mockito.doAnswer(new Answer<Void>() {
       @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 311d5a6..02e8dc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -221,7 +222,8 @@ public class TestBlockRecovery {
             Mockito.any(VolumeFailureSummary.class),
             Mockito.anyBoolean(),
             Mockito.any(SlowPeerReports.class),
-            Mockito.any(SlowDiskReports.class)))
+            Mockito.any(SlowDiskReports.class),
+            Mockito.any(BlocksStorageMovementResult[].class)))
         .thenReturn(new HeartbeatResponse(
             new DatanodeCommand[0],
             new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
index 28427bc..b15b530 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
@@ -172,7 +173,8 @@ public class TestDataNodeLifeline {
             any(VolumeFailureSummary.class),
             anyBoolean(),
             any(SlowPeerReports.class),
-            any(SlowDiskReports.class));
+            any(SlowDiskReports.class),
+            any(BlocksStorageMovementResult[].class));
 
     // Intercept lifeline to trigger latch count-down on each call.
     doAnswer(new LatchCountingAnswer<Void>(lifelinesSent))
@@ -237,7 +239,8 @@ public class TestDataNodeLifeline {
             any(VolumeFailureSummary.class),
             anyBoolean(),
             any(SlowPeerReports.class),
-            any(SlowDiskReports.class));
+            any(SlowDiskReports.class),
+            any(BlocksStorageMovementResult[].class));
 
     // While waiting on the latch for the expected number of heartbeat messages,
     // poll DataNode tracking information.  We expect that the DataNode always

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
index bb1d9ef..d7ac3f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
@@ -222,7 +223,8 @@ public class TestDatanodeProtocolRetryPolicy {
            Mockito.any(VolumeFailureSummary.class),
            Mockito.anyBoolean(),
            Mockito.any(SlowPeerReports.class),
-           Mockito.any(SlowDiskReports.class));
+           Mockito.any(SlowDiskReports.class),
+           Mockito.any(BlocksStorageMovementResult[].class));
 
     dn = new DataNode(conf, locations, null, null) {
       @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
index 2dbd5b9..b9f21a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.Page
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -208,7 +209,8 @@ public class TestFsDatasetCache {
           (StorageReport[]) any(), anyLong(), anyLong(),
           anyInt(), anyInt(), anyInt(), (VolumeFailureSummary) any(),
           anyBoolean(), any(SlowPeerReports.class),
-          any(SlowDiskReports.class));
+          any(SlowDiskReports.class),
+          (BlocksStorageMovementResult[]) any());
     } finally {
       lock.writeLock().unlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
index ea3eec3..1eb44e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
@@ -34,10 +34,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementResult;
-import org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -191,12 +190,12 @@ public class TestStoragePolicySatisfyWorker {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
-        List<BlockMovementResult> completedBlocks = worker
-            .getBlocksMovementsCompletionHandler().getCompletedBlocks();
+        List<BlocksStorageMovementResult> completedBlocks = worker
+            .getBlocksMovementsCompletionHandler().getBlksMovementResults();
         int failedCount = 0;
-        for (BlockMovementResult blockMovementResult : completedBlocks) {
-          if (BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE ==
-              blockMovementResult.getStatus()) {
+        for (BlocksStorageMovementResult blkMovementResult : completedBlocks) {
+          if (blkMovementResult.getStatus() ==
+              BlocksStorageMovementResult.Status.FAILURE) {
             failedCount++;
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
index 5f62ddb..df120ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
@@ -110,7 +111,8 @@ public class TestStorageReport {
         anyLong(), anyLong(), anyInt(), anyInt(), anyInt(),
         Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(),
         Mockito.any(SlowPeerReports.class),
-        Mockito.any(SlowDiskReports.class));
+        Mockito.any(SlowDiskReports.class),
+        Mockito.any(BlocksStorageMovementResult[].class));
 
     StorageReport[] reports = captor.getValue();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 3a3c471..1e016f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -956,8 +957,8 @@ public class NNThroughputBenchmark implements Tool {
           DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0L) };
       DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration, rep,
           0L, 0L, 0, 0, 0, null, true,
-          SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT)
-          .getCommands();
+          SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT,
+          new BlocksStorageMovementResult[0]).getCommands();
       if(cmds != null) {
         for (DatanodeCommand cmd : cmds ) {
           if(LOG.isDebugEnabled()) {
@@ -1007,8 +1008,8 @@ public class NNThroughputBenchmark implements Tool {
           false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0) };
       DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
           rep, 0L, 0L, 0, 0, 0, null, true,
-          SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT)
-          .getCommands();
+          SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT,
+          new BlocksStorageMovementResult[0]).getCommands();
       if (cmds != null) {
         for (DatanodeCommand cmd : cmds) {
           if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 242e8f5..ba29c82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -126,7 +127,8 @@ public class NameNodeAdapter {
     return namesystem.handleHeartbeat(nodeReg,
         BlockManagerTestUtil.getStorageReportsForDatanode(dd),
         dd.getCacheCapacity(), dd.getCacheRemaining(), 0, 0, 0, null, true,
-        SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT);
+        SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT,
+        new BlocksStorageMovementResult[0]);
   }
 
   public static boolean setReplication(final FSNamesystem ns,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 74be90c..87b8e79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -139,8 +140,8 @@ public class TestDeadDatanode {
         false, 0, 0, 0, 0, 0) };
     DatanodeCommand[] cmd =
         dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true,
-            SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT)
-            .getCommands();
+            SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT,
+            new BlocksStorageMovementResult[0]).getCommands();
     assertEquals(1, cmd.length);
     assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
         .getAction());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7130518e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 37664b5..cbfdfc6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -27,6 +28,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
@@ -146,6 +148,54 @@ public class TestStoragePolicySatisfier {
     }
   }
 
+  /**
+   * Tests to verify that the block storage movement results will be propagated
+   * to Namenode via datanode heartbeat.
+   */
+  @Test(timeout = 300000)
+  public void testPerTrackIdBlocksStorageMovementResults() throws Exception {
+    try {
+      // Change policy to ONE_SSD
+      distributedFS.setStoragePolicy(new Path(file), "ONE_SSD");
+      FSNamesystem namesystem = hdfsCluster.getNamesystem();
+      INode inode = namesystem.getFSDirectory().getINode(file);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
+
+      // Making sure SDD based nodes added to cluster. Adding SSD based
+      // datanodes.
+      startAdditionalDNs(config, 1, numOfDatanodes, newtypes,
+          storagesPerDatanode, capacity, hdfsCluster);
+      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+      hdfsCluster.triggerHeartbeats();
+
+      // Wait till the block is moved to SSD areas
+      waitExpectedStorageType(file, StorageType.SSD, distributedFS, 1, 30000);
+      waitExpectedStorageType(file, StorageType.DISK, distributedFS, 2, 30000);
+
+      // TODO: Temporarily using the results from StoragePolicySatisfier class.
+      // This has to be revisited as part of HDFS-11029.
+      waitForBlocksMovementResult(1, 30000);
+    } finally {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  private void waitForBlocksMovementResult(int expectedResultsCount,
+      int timeout) throws TimeoutException, InterruptedException {
+    BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
+    final StoragePolicySatisfier sps = blockManager.getStoragePolicySatisfier();
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        LOG.info("expectedResultsCount={} actualResultsCount={}",
+            expectedResultsCount, sps.results.size());
+        return expectedResultsCount == sps.results.size();
+      }
+    }, 100, timeout);
+  }
+
   private void writeContent(final DistributedFileSystem dfs,
       final String fileName) throws IOException {
     // write to DISK


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org