You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2018/07/04 06:22:54 UTC

hbase git commit: HBASE-20691 Change the default WAL storage policy back to "NONE""

Repository: hbase
Updated Branches:
  refs/heads/master ee3990e42 -> ec8947f22


HBASE-20691 Change the default WAL storage policy back to "NONE""

This reverts commit 564c193d61cd1f92688a08a3af6d55ce4c4636d8 and added more doc
about why we choose "NONE" as the default.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ec8947f2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ec8947f2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ec8947f2

Branch: refs/heads/master
Commit: ec8947f226d2ad6ab117811bcfa398df012a354d
Parents: ee3990e
Author: Yu Li <li...@apache.org>
Authored: Thu Jun 7 14:12:55 2018 +0800
Committer: Yu Li <li...@apache.org>
Committed: Wed Jul 4 13:43:48 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/HConstants.java     |  8 ++-
 .../apache/hadoop/hbase/util/CommonFSUtils.java | 71 +++++++++++---------
 .../procedure2/store/wal/WALProcedureStore.java |  5 +-
 .../hbase/regionserver/wal/AbstractFSWAL.java   |  5 +-
 .../apache/hadoop/hbase/util/TestFSUtils.java   | 57 +++++++++++++++-
 5 files changed, 105 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/ec8947f2/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index b5a5d62..fa1a772 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1077,7 +1077,13 @@ public final class HConstants {
    * Valid values are: HOT, COLD, WARM, ALL_SSD, ONE_SSD, LAZY_PERSIST
    * See http://hadoop.apache.org/docs/r2.7.3/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
   public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy";
-  public static final String DEFAULT_WAL_STORAGE_POLICY = "HOT";
+  /**
+   * "NONE" is not a valid storage policy and means we defer the policy to HDFS. @see
+   * <a href="https://issues.apache.org/jira/browse/HBASE-20691">HBASE-20691</a>
+   */
+  public static final String DEFER_TO_HDFS_STORAGE_POLICY = "NONE";
+  /** By default we defer the WAL storage policy to HDFS */
+  public static final String DEFAULT_WAL_STORAGE_POLICY = DEFER_TO_HDFS_STORAGE_POLICY;
 
   /** Region in Transition metrics threshold time */
   public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD =

http://git-wip-us.apache.org/repos/asf/hbase/blob/ec8947f2/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index 5b46de9..76a3601 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -454,36 +454,6 @@ public abstract class CommonFSUtils {
         new Path(namespace)));
   }
 
-  /**
-   * Sets storage policy for given path according to config setting.
-   * If the passed path is a directory, we'll set the storage policy for all files
-   * created in the future in said directory. Note that this change in storage
-   * policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle.
-   * If we're running on a FileSystem implementation that doesn't support the given storage policy
-   * (or storage policies at all), then we'll issue a log message and continue.
-   *
-   * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
-   *
-   * @param fs We only do anything it implements a setStoragePolicy method
-   * @param conf used to look up storage policy with given key; not modified.
-   * @param path the Path whose storage policy is to be set
-   * @param policyKey Key to use pulling a policy from Configuration:
-   *   e.g. HConstants.WAL_STORAGE_POLICY (hbase.wal.storage.policy).
-   * @param defaultPolicy if the configured policy is equal to this policy name, we will skip
-   *   telling the FileSystem to set a storage policy.
-   */
-  public static void setStoragePolicy(final FileSystem fs, final Configuration conf,
-      final Path path, final String policyKey, final String defaultPolicy) {
-    String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(Locale.ROOT);
-    if (storagePolicy.equals(defaultPolicy)) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("default policy of " + defaultPolicy + " requested, exiting early.");
-      }
-      return;
-    }
-    setStoragePolicy(fs, path, storagePolicy);
-  }
-
   // this mapping means that under a federated FileSystem implementation, we'll
   // only log the first failure from any of the underlying FileSystems at WARN and all others
   // will be at DEBUG.
@@ -508,33 +478,63 @@ public abstract class CommonFSUtils {
    */
   public static void setStoragePolicy(final FileSystem fs, final Path path,
       final String storagePolicy) {
+    try {
+      setStoragePolicy(fs, path, storagePolicy, false);
+    } catch (IOException e) {
+      // should never arrive here
+      LOG.warn("We have chosen not to throw exception but some unexpectedly thrown out", e);
+    }
+  }
+
+  static void setStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy,
+      boolean throwException) throws IOException {
     if (storagePolicy == null) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("We were passed a null storagePolicy, exiting early.");
       }
       return;
     }
-    final String trimmedStoragePolicy = storagePolicy.trim();
+    String trimmedStoragePolicy = storagePolicy.trim();
     if (trimmedStoragePolicy.isEmpty()) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("We were passed an empty storagePolicy, exiting early.");
       }
       return;
+    } else {
+      trimmedStoragePolicy = trimmedStoragePolicy.toUpperCase(Locale.ROOT);
+    }
+    if (trimmedStoragePolicy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("We were passed the defer-to-hdfs policy {}, exiting early.",
+          trimmedStoragePolicy);
+      }
+      return;
+    }
+    try {
+      invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);
+    } catch (IOException e) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Failed to invoke set storage policy API on FS", e);
+      }
+      if (throwException) {
+        throw e;
+      }
     }
-    invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);
   }
 
   /*
    * All args have been checked and are good. Run the setStoragePolicy invocation.
    */
   private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,
-      final String storagePolicy) {
+      final String storagePolicy) throws IOException {
     Method m = null;
+    Exception toThrow = null;
     try {
       m = fs.getClass().getDeclaredMethod("setStoragePolicy",
         new Class<?>[] { Path.class, String.class });
       m.setAccessible(true);
     } catch (NoSuchMethodException e) {
+      toThrow = e;
       final String msg = "FileSystem doesn't support setStoragePolicy; HDFS-6584, HDFS-9345 " +
           "not available. This is normal and expected on earlier Hadoop versions.";
       if (!warningMap.containsKey(fs)) {
@@ -545,6 +545,7 @@ public abstract class CommonFSUtils {
       }
       m = null;
     } catch (SecurityException e) {
+      toThrow = e;
       final String msg = "No access to setStoragePolicy on FileSystem from the SecurityManager; " +
           "HDFS-6584, HDFS-9345 not available. This is unusual and probably warrants an email " +
           "to the user@hbase mailing list. Please be sure to include a link to your configs, and " +
@@ -565,6 +566,7 @@ public abstract class CommonFSUtils {
           LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
         }
       } catch (Exception e) {
+        toThrow = e;
         // This swallows FNFE, should we be throwing it? seems more likely to indicate dev
         // misuse than a runtime problem with HDFS.
         if (!warningMap.containsKey(fs)) {
@@ -603,6 +605,9 @@ public abstract class CommonFSUtils {
         }
       }
     }
+    if (toThrow != null) {
+      throw new IOException(toThrow);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/ec8947f2/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 977921f..64bc712 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -216,8 +216,9 @@ public class WALProcedureStore extends ProcedureStoreBase {
       }
     }
     // Now that it exists, set the log policy
-    CommonFSUtils.setStoragePolicy(fs, conf, walDir, HConstants.WAL_STORAGE_POLICY,
-      HConstants.DEFAULT_WAL_STORAGE_POLICY);
+    String storagePolicy =
+        conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
+    CommonFSUtils.setStoragePolicy(fs, walDir, storagePolicy);
 
     // Create archive dir up front. Rename won't work w/o it up on HDFS.
     if (this.walArchiveDir != null && !this.fs.exists(this.walArchiveDir)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ec8947f2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 72ad8b8..2b45a04 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -363,8 +363,9 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
     }
     // Now that it exists, set the storage policy for the entire directory of wal files related to
     // this FSHLog instance
-    CommonFSUtils.setStoragePolicy(fs, conf, this.walDir, HConstants.WAL_STORAGE_POLICY,
-      HConstants.DEFAULT_WAL_STORAGE_POLICY);
+    String storagePolicy =
+        conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
+    CommonFSUtils.setStoragePolicy(fs, this.walDir, storagePolicy);
     this.walFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8");
     this.prefixPathStr = new Path(walDir, walFilePrefix + WAL_FILE_NAME_DELIMITER).toString();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ec8947f2/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index d5c920d..c5863f7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -40,12 +40,15 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -338,19 +341,54 @@ public class TestFSUtils {
 
   @Test
   public void testSetStoragePolicyDefault() throws Exception {
+    verifyNoHDFSApiInvocationForDefaultPolicy();
     verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY);
   }
 
+  /**
+   * Note: currently the default policy is set to defer to HDFS and this case is to verify the
+   * logic, will need to remove the check if the default policy is changed
+   */
+  private void verifyNoHDFSApiInvocationForDefaultPolicy() {
+    FileSystem testFs = new AlwaysFailSetStoragePolicyFileSystem();
+    // There should be no exception thrown when setting to default storage policy, which indicates
+    // the HDFS API hasn't been called
+    try {
+      FSUtils.setStoragePolicy(testFs, new Path("non-exist"), HConstants.DEFAULT_WAL_STORAGE_POLICY,
+        true);
+    } catch (IOException e) {
+      Assert.fail("Should have bypassed the FS API when setting default storage policy");
+    }
+    // There should be exception thrown when given non-default storage policy, which indicates the
+    // HDFS API has been called
+    try {
+      FSUtils.setStoragePolicy(testFs, new Path("non-exist"), "HOT", true);
+      Assert.fail("Should have invoked the FS API but haven't");
+    } catch (IOException e) {
+      // expected given an invalid path
+    }
+  }
+
+  class AlwaysFailSetStoragePolicyFileSystem extends DistributedFileSystem {
+    @Override
+    public void setStoragePolicy(final Path src, final String policyName)
+            throws IOException {
+      throw new IOException("The setStoragePolicy method is invoked");
+    }
+  }
+
   /* might log a warning, but still work. (always warning on Hadoop < 2.6.0) */
   @Test
   public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception {
     verifyFileInDirWithStoragePolicy("ALL_SSD");
   }
 
+  final String INVALID_STORAGE_POLICY = "1772";
+
   /* should log a warning, but still work. (different warning on Hadoop < 2.6.0) */
   @Test
   public void testSetStoragePolicyInvalid() throws Exception {
-    verifyFileInDirWithStoragePolicy("1772");
+    verifyFileInDirWithStoragePolicy(INVALID_STORAGE_POLICY);
   }
 
   // Here instead of TestCommonFSUtils because we need a minicluster
@@ -365,12 +403,25 @@ public class TestFSUtils {
       Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
       fs.mkdirs(testDir);
 
-      FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY,
-          HConstants.DEFAULT_WAL_STORAGE_POLICY);
+      String storagePolicy =
+          conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
+      FSUtils.setStoragePolicy(fs, testDir, storagePolicy);
 
       String file =htu.getRandomUUID().toString();
       Path p = new Path(testDir, file);
       WriteDataToHDFS(fs, p, 4096);
+      HFileSystem hfs = new HFileSystem(fs);
+      String policySet = hfs.getStoragePolicyName(p);
+      LOG.debug("The storage policy of path " + p + " is " + policySet);
+      if (policy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)
+              || policy.equals(INVALID_STORAGE_POLICY)) {
+        String hdfsDefaultPolicy = hfs.getStoragePolicyName(hfs.getHomeDirectory());
+        LOG.debug("The default hdfs storage policy (indicated by home path: "
+                + hfs.getHomeDirectory() + ") is " + hdfsDefaultPolicy);
+        Assert.assertEquals(hdfsDefaultPolicy, policySet);
+      } else {
+        Assert.assertEquals(policy, policySet);
+      }
       // will assert existance before deleting.
       cleanupFile(fs, testDir);
     } finally {