You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by um...@apache.org on 2016/06/22 18:18:37 UTC

hadoop git commit: HDFS-10473: Allow only suitable storage policies to be set on striped files. Contributed by Uma Maheswara Rao G

Repository: hadoop
Updated Branches:
  refs/heads/trunk 4ee354362 -> 17eae9ebb


HDFS-10473: Allow only suitable storage policies to be set on striped files. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17eae9eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17eae9eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17eae9eb

Branch: refs/heads/trunk
Commit: 17eae9ebb30a3b106c4f6ae0c5374a3ab83abd8a
Parents: 4ee3543
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Wed Jun 22 11:17:43 2016 -0700
Committer: Uma Maheswara Rao G <um...@intel.com>
Committed: Wed Jun 22 11:17:43 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 15 +++-
 .../namenode/ErasureCodingPolicyManager.java    | 20 +++++
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 19 ++++-
 .../hadoop/hdfs/server/mover/TestMover.java     | 32 +++++++
 .../server/namenode/TestStripedINodeFile.java   | 87 +++++++++++++++++++-
 5 files changed, 170 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index b473a4d..cd37b15b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
 import org.apache.hadoop.hdfs.server.balancer.Matcher;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@@ -386,7 +387,19 @@ public class Mover {
         }
         LocatedBlock lb = lbs.get(i);
         if (lb.isStriped()) {
-          types = policy.chooseStorageTypes((short) lb.getLocations().length);
+          if (ErasureCodingPolicyManager
+              .checkStoragePolicySuitableForECStripedMode(policyId)) {
+            types = policy.chooseStorageTypes((short) lb.getLocations().length);
+          } else {
+            // Currently we support only limited policies (HOT, COLD, ALLSSD)
+            // for EC striped mode files.
+            // Mover tool will ignore to move the blocks if the storage policy
+            // is not in EC Striped mode supported policies
+            LOG.warn("The storage policy " + policy.getName()
+                + " is not suitable for Striped EC files. "
+                + "So, Ignoring to move the blocks");
+            return;
+          }
         }
         final StorageTypeDiff diff = new StorageTypeDiff(types,
             lb.getStorageTypes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index eaf63f9..c4bc8de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -53,6 +53,11 @@ public final class ErasureCodingPolicyManager {
   private static final ErasureCodingPolicy[] SYS_POLICIES =
       new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3};
 
+  // Supported storage policies for striped EC files
+  private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = new byte[] {
+      HdfsConstants.HOT_STORAGE_POLICY_ID, HdfsConstants.COLD_STORAGE_POLICY_ID,
+      HdfsConstants.ALLSSD_STORAGE_POLICY_ID };
+
   /**
    * All active policies maintained in NN memory for fast querying,
    * identified and sorted by its name.
@@ -121,6 +126,21 @@ public final class ErasureCodingPolicyManager {
   }
 
   /**
+   * @return True if given policy is be suitable for striped EC Files.
+   */
+  public static boolean checkStoragePolicySuitableForECStripedMode(
+      byte storagePolicyID) {
+    boolean isPolicySuitable = false;
+    for (byte suitablePolicy : SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE) {
+      if (storagePolicyID == suitablePolicy) {
+        isPolicySuitable = true;
+        break;
+      }
+    }
+    return isPolicySuitable;
+  }
+
+  /**
    * Clear and clean up
    */
   public void clear() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 5c10c86..63945a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -496,9 +497,25 @@ public class INodeFile extends INodeWithAdditionalFields
   public byte getStoragePolicyID() {
     byte id = getLocalStoragePolicyID();
     if (id == BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
-      return this.getParent() != null ?
+      id = this.getParent() != null ?
           this.getParent().getStoragePolicyID() : id;
     }
+
+    // For Striped EC files, we support only suitable policies. Current
+    // supported policies are HOT, COLD, ALL_SSD.
+    // If the file was set with any other policies, then we just treat policy as
+    // BLOCK_STORAGE_POLICY_ID_UNSPECIFIED.
+    if (isStriped() && id != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
+        && !ErasureCodingPolicyManager
+            .checkStoragePolicySuitableForECStripedMode(id)) {
+      id = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("The current effective storage policy id : " + id
+            + " is not suitable for striped mode EC file : " + getName()
+            + ". So, just returning unspecified storage policy id");
+      }
+    }
+
     return id;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index befab80..f382243 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -526,6 +526,38 @@ public class TestMover {
       StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
           dataBlocks + parityBlocks);
 
+      // start 5 more datanodes
+      numOfDatanodes += 5;
+      capacities = new long[5][storagesPerDatanode];
+      for (int i = 0; i < 5; i++) {
+        for (int j = 0; j < storagesPerDatanode; j++) {
+          capacities[i][j] = capacity;
+        }
+      }
+      cluster.startDataNodes(conf, 5,
+          new StorageType[][] { { StorageType.SSD, StorageType.DISK },
+              { StorageType.SSD, StorageType.DISK },
+              { StorageType.SSD, StorageType.DISK },
+              { StorageType.SSD, StorageType.DISK },
+              { StorageType.SSD, StorageType.DISK } },
+          true, null, null, null, capacities, null, false, false, false, null);
+      cluster.triggerHeartbeats();
+
+      // move file blocks to ONE_SSD policy
+      client.setStoragePolicy(barDir, "ONE_SSD");
+
+      // run Mover
+      rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
+
+      // verify storage types and locations
+      // Movements should have been ignored for the unsupported policy on
+      // striped file
+      locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
+      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+        for (StorageType type : lb.getStorageTypes()) {
+          Assert.assertEquals(StorageType.ARCHIVE, type);
+        }
+      }
     }finally{
       cluster.shutdown();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
index 0d15467..3703501 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -27,21 +27,28 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-
+import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
@@ -287,4 +294,82 @@ public class TestStripedINodeFile {
       }
     }
   }
+
+  /**
+   * Tests when choosing blocks on file creation of EC striped mode should
+   * ignore storage policy if that is not suitable. Supported storage policies
+   * for EC Striped mode are HOT, COLD and ALL_SSD. For all other policies set
+   * will be ignored and considered default policy.
+   */
+  @Test(timeout = 60000)
+  public void testUnsuitableStoragePoliciesWithECStripedMode()
+      throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    int defaultStripedBlockSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE
+        * 4;
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+        false);
+
+    // start 10 datanodes
+    int numOfDatanodes = 10;
+    int storagesPerDatanode = 2;
+    long capacity = 10 * defaultStripedBlockSize;
+    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+    for (int i = 0; i < numOfDatanodes; i++) {
+      for (int j = 0; j < storagesPerDatanode; j++) {
+        capacities[i][j] = capacity;
+      }
+    }
+
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode)
+        .storageTypes(
+            new StorageType[][] { { StorageType.SSD, StorageType.DISK },
+                { StorageType.SSD, StorageType.DISK },
+                { StorageType.SSD, StorageType.DISK },
+                { StorageType.SSD, StorageType.DISK },
+                { StorageType.SSD, StorageType.DISK },
+                { StorageType.DISK, StorageType.SSD },
+                { StorageType.DISK, StorageType.SSD },
+                { StorageType.DISK, StorageType.SSD },
+                { StorageType.DISK, StorageType.SSD },
+                { StorageType.DISK, StorageType.SSD } })
+        .storageCapacities(capacities).build();
+
+    try {
+      cluster.waitActive();
+
+      // set "/foo" directory with ONE_SSD storage policy.
+      ClientProtocol client = NameNodeProxies.createProxy(conf,
+          cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+      String fooDir = "/foo";
+      client.mkdirs(fooDir, new FsPermission((short) 777), true);
+      client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
+      // set an EC policy on "/foo" directory
+      client.setErasureCodingPolicy(fooDir, null);
+
+      // write file to fooDir
+      final String barFile = "/foo/bar";
+      long fileLen = 20 * defaultStripedBlockSize;
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path(barFile),
+          fileLen, (short) 3, 0);
+
+      // verify storage types and locations
+      LocatedBlocks locatedBlocks = client.getBlockLocations(barFile, 0,
+          fileLen);
+      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+        for (StorageType type : lb.getStorageTypes()) {
+          Assert.assertEquals(StorageType.DISK, type);
+        }
+      }
+
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org