You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ra...@apache.org on 2017/08/27 07:15:59 UTC

[32/50] [abbrv] hadoop git commit: HDFS-11239: [SPS]: Check Mover file ID lease also to determine whether Mover is running. Contributed by Wei Zhou

HDFS-11239: [SPS]: Check Mover file ID lease also to determine whether Mover is running. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34ba6df6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34ba6df6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34ba6df6

Branch: refs/heads/HDFS-10285
Commit: 34ba6df6131fa3821924c0fd81a467a582da14fb
Parents: 69d8ccc
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Fri Feb 17 20:49:38 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:55:20 2017 +0530

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSNamesystem.java      |  17 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |   7 ++
 .../server/namenode/StoragePolicySatisfier.java |  19 +---
 .../namenode/TestStoragePolicySatisfier.java    | 108 +++++++++++++++----
 4 files changed, 113 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ba6df6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 3cb47b9..b3663a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3437,7 +3437,22 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   BlockInfo getStoredBlock(Block block) {
     return blockManager.getStoredBlock(block);
   }
-  
+
+  @Override
+  public boolean isFileOpenedForWrite(String path) {
+    readLock();
+    try {
+      INode inode = dir.getINode(path, FSDirectory.DirOp.READ);
+      INodeFile iNodeFile = INodeFile.valueOf(inode, path);
+      LeaseManager.Lease lease = leaseManager.getLease(iNodeFile);
+      return lease != null;
+    } catch (IOException e) {
+      return false;
+    } finally {
+      readUnlock();
+    }
+  }
+
   @Override
   public boolean isInSnapshot(long blockCollectionID) {
     assert hasReadLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ba6df6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
index e07376b..a2b07ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
@@ -45,4 +45,11 @@ public interface Namesystem extends RwLock, SafeMode {
    *         middle of the starting active services.
    */
   boolean inTransitionToActive();
+
+  /**
+   * Check if file is been opened for write purpose.
+   * @param filePath
+   * @return true if valid write lease exists, otherwise return false.
+   */
+  boolean isFileOpenedForWrite(String filePath);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ba6df6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index dc58294..29c8a5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -151,19 +150,8 @@ public class StoragePolicySatisfier implements Runnable {
 
   // Return true if a Mover instance is running
   private boolean checkIfMoverRunning() {
-    boolean ret = false;
-    try {
-      String moverId = HdfsServerConstants.MOVER_ID_PATH.toString();
-      INode inode = namesystem.getFSDirectory().getINode(
-          moverId, FSDirectory.DirOp.READ);
-      if (inode != null) {
-        ret = true;
-      }
-    } catch (IOException e) {
-      LOG.info("StoragePolicySatisfier is enabled as no Mover ID file found.");
-      ret = false;
-    }
-    return ret;
+    String moverId = HdfsServerConstants.MOVER_ID_PATH.toString();
+    return namesystem.isFileOpenedForWrite(moverId);
   }
 
   @Override
@@ -177,7 +165,8 @@ public class StoragePolicySatisfier implements Runnable {
         this.storageMovementsMonitor.stop();
         LOG.error(
             "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-                + HdfsServerConstants.MOVER_ID_PATH.toString() + " exists");
+                + HdfsServerConstants.MOVER_ID_PATH.toString()
+                + " been opened. Maybe a Mover instance is running!");
         return;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ba6df6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index de73e8b..2a33455 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -97,29 +98,33 @@ public class TestStoragePolicySatisfier {
 
     try {
       createCluster();
-      // Change policy to COLD
-      dfs.setStoragePolicy(new Path(file), COLD);
-      FSNamesystem namesystem = hdfsCluster.getNamesystem();
-      INode inode = namesystem.getFSDirectory().getINode(file);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
-              {StorageType.ARCHIVE, StorageType.ARCHIVE},
-              {StorageType.ARCHIVE, StorageType.ARCHIVE}};
-      startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
-          storagesPerDatanode, capacity, hdfsCluster);
-
-      namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
-
-      hdfsCluster.triggerHeartbeats();
-      // Wait till namenode notified about the block location details
-      DFSTestUtil.waitExpectedStorageType(
-          file, StorageType.ARCHIVE, 3, 30000, dfs);
+      doTestWhenStoragePolicySetToCOLD();
     } finally {
       shutdownCluster();
     }
   }
 
+  private void doTestWhenStoragePolicySetToCOLD() throws Exception {
+    // Change policy to COLD
+    dfs.setStoragePolicy(new Path(file), COLD);
+    FSNamesystem namesystem = hdfsCluster.getNamesystem();
+    INode inode = namesystem.getFSDirectory().getINode(file);
+
+    StorageType[][] newtypes =
+        new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+    startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
+        storagesPerDatanode, capacity, hdfsCluster);
+
+    namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
+
+    hdfsCluster.triggerHeartbeats();
+    // Wait till namenode notified about the block location details
+    DFSTestUtil.waitExpectedStorageType(
+        file, StorageType.ARCHIVE, 3, 30000, dfs);
+  }
+
   @Test(timeout = 300000)
   public void testWhenStoragePolicySetToALLSSD()
       throws Exception {
@@ -500,19 +505,78 @@ public class TestStoragePolicySatisfier {
    */
   @Test(timeout = 300000)
   public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
+      throws Exception {
+    boolean running;
+    FSDataOutputStream out = null;
+    try {
+      createCluster();
+      // Stop SPS
+      hdfsCluster.getNameNode().reconfigurePropertyImpl(
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
+      running = hdfsCluster.getFileSystem()
+          .getClient().isStoragePolicySatisfierRunning();
+      Assert.assertFalse("SPS should stopped as configured.", running);
+
+      // Simulate the case by creating MOVER_ID file
+      out = hdfsCluster.getFileSystem().create(
+          HdfsServerConstants.MOVER_ID_PATH);
+
+      // Restart SPS
+      hdfsCluster.getNameNode().reconfigurePropertyImpl(
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
+
+      running = hdfsCluster.getFileSystem()
+          .getClient().isStoragePolicySatisfierRunning();
+      Assert.assertFalse("SPS should not be able to run as file "
+          + HdfsServerConstants.MOVER_ID_PATH + " is being hold.", running);
+
+      // Simulate Mover exists
+      out.close();
+      out = null;
+      hdfsCluster.getFileSystem().delete(
+          HdfsServerConstants.MOVER_ID_PATH, true);
+
+      // Restart SPS again
+      hdfsCluster.getNameNode().reconfigurePropertyImpl(
+          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
+      running = hdfsCluster.getFileSystem()
+          .getClient().isStoragePolicySatisfierRunning();
+      Assert.assertTrue("SPS should be running as "
+          + "Mover already exited", running);
+
+      // Check functionality after SPS restart
+      doTestWhenStoragePolicySetToCOLD();
+    } catch (ReconfigurationException e) {
+      throw new IOException("Exception when reconfigure "
+          + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, e);
+    } finally {
+      if (out != null) {
+        out.close();
+      }
+      hdfsCluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests to verify that SPS should be able to start when the Mover ID file
+   * is not being hold by a Mover. This can be the case when Mover exits
+   * ungracefully without deleting the ID file from HDFS.
+   */
+  @Test(timeout = 300000)
+  public void testWhenMoverExitsWithoutDeleteMoverIDFile()
       throws IOException {
     try {
       createCluster();
-      // Simulate Mover by creating MOVER_ID file
+      // Simulate the case by creating MOVER_ID file
       DFSTestUtil.createFile(hdfsCluster.getFileSystem(),
           HdfsServerConstants.MOVER_ID_PATH, 0, (short) 1, 0);
       hdfsCluster.restartNameNode(true);
       boolean running = hdfsCluster.getFileSystem()
           .getClient().isStoragePolicySatisfierRunning();
-      Assert.assertFalse("SPS should not start "
-          + "when a Mover instance is running", running);
+      Assert.assertTrue("SPS should be running as "
+          + "no Mover really running", running);
     } finally {
-      shutdownCluster();
+      hdfsCluster.shutdown();
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org