You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2015/04/20 17:51:30 UTC
hbase git commit: HBASE-13498 Add more docs and a basic check for
storage policy handling.
Repository: hbase
Updated Branches:
refs/heads/master 3bf69761e -> 702aea5b3
HBASE-13498 Add more docs and a basic check for storage policy handling.
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/702aea5b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/702aea5b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/702aea5b
Branch: refs/heads/master
Commit: 702aea5b38ed6ad0942b0c59c3accca476b46873
Parents: 3bf6976
Author: Sean Busbey <bu...@apache.org>
Authored: Mon Apr 20 00:31:57 2015 -0500
Committer: Sean Busbey <bu...@apache.org>
Committed: Mon Apr 20 10:45:14 2015 -0500
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/HConstants.java | 6 +-
.../hadoop/hbase/regionserver/wal/FSHLog.java | 2 +
.../org/apache/hadoop/hbase/util/FSUtils.java | 61 ++++++++++++++++----
.../apache/hadoop/hbase/util/TestFSUtils.java | 45 ++++++++++++++-
4 files changed, 98 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/702aea5b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index fc65c47..0b755d7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -957,9 +957,9 @@ public final class HConstants {
/** Configuration name of WAL storage policy
* Valid values are:
- * NONE: no preference in destination of replicas
- * ONE_SSD: place only one replica in SSD and the remaining in default storage
- * and ALL_SSD: place all replica on SSD
+ * NONE: no preference in destination of block replicas
+ * ONE_SSD: place only one block replica in SSD and the remaining in default storage
+ * and ALL_SSD: place all block replicas on SSD
*
* See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy";
http://git-wip-us.apache.org/repos/asf/hbase/blob/702aea5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 443134d..60e36f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -491,6 +491,8 @@ public class FSHLog implements WAL {
throw new IllegalArgumentException("wal suffix must start with '" + WAL_FILE_NAME_DELIMITER +
"' but instead was '" + suffix + "'");
}
+ // Now that it exists, set the storage policy for the entire directory of wal files related to
+ // this FSHLog instance
FSUtils.setStoragePolicy(fs, conf, this.fullPathLogDir, HConstants.WAL_STORAGE_POLICY,
HConstants.DEFAULT_WAL_STORAGE_POLICY);
this.logFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, "UTF8");
http://git-wip-us.apache.org/repos/asf/hbase/blob/702aea5b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index e86054b..3631292 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -45,6 +45,7 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -103,20 +104,34 @@ public abstract class FSUtils {
super();
}
- /*
- * Sets storage policy for given path according to config setting
- * @param fs
- * @param conf
+ /**
+ * Sets storage policy for given path according to config setting.
+ * If the passed path is a directory, we'll set the storage policy for all files
+ * created in the future in said directory. Note that this change in storage
+ * policy takes place at the HDFS level; it will persist beyond this RS's lifecycle.
+ * If we're running on a version of HDFS that doesn't support the given storage policy
+ * (or storage policies at all), then we'll issue a log message and continue.
+ *
+ * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
+ *
+ * @param fs We only do anything if an instance of DistributedFileSystem
+ * @param conf used to look up storage policy with given key; not modified.
* @param path the Path whose storage policy is to be set
- * @param policyKey
- * @param defaultPolicy
+ * @param policyKey e.g. HConstants.WAL_STORAGE_POLICY
+ * @param defaultPolicy usually should be the policy NONE to delegate to HDFS
*/
public static void setStoragePolicy(final FileSystem fs, final Configuration conf,
final Path path, final String policyKey, final String defaultPolicy) {
String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase();
- if (!storagePolicy.equals(defaultPolicy) &&
- fs instanceof DistributedFileSystem) {
+ if (storagePolicy.equals(defaultPolicy)) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("default policy of " + defaultPolicy + " requested, exiting early.");
+ }
+ return;
+ }
+ if (fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem)fs;
+ // Once our minimum supported Hadoop version is 2.6.0 we can remove reflection.
Class<? extends DistributedFileSystem> dfsClass = dfs.getClass();
Method m = null;
try {
@@ -125,10 +140,10 @@ public abstract class FSUtils {
m.setAccessible(true);
} catch (NoSuchMethodException e) {
LOG.info("FileSystem doesn't support"
- + " setStoragePolicy; --HDFS-7228 not available");
+ + " setStoragePolicy; --HDFS-6584 not available");
} catch (SecurityException e) {
LOG.info("Doesn't have access to setStoragePolicy on "
- + "FileSystems --HDFS-7228 not available", e);
+ + "FileSystems --HDFS-6584 not available", e);
m = null; // could happen on setAccessible()
}
if (m != null) {
@@ -136,9 +151,31 @@ public abstract class FSUtils {
m.invoke(dfs, path, storagePolicy);
LOG.info("set " + storagePolicy + " for " + path);
} catch (Exception e) {
- LOG.warn("Unable to set " + storagePolicy + " for " + path, e);
+ // check for lack of HDFS-7228
+ boolean probablyBadPolicy = false;
+ if (e instanceof InvocationTargetException) {
+ final Throwable exception = e.getCause();
+ if (exception instanceof RemoteException &&
+ HadoopIllegalArgumentException.class.getName().equals(
+ ((RemoteException)exception).getClassName())) {
+ LOG.warn("Given storage policy, '" + storagePolicy + "', was rejected and probably " +
+ "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
+ "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
+ "more information see the 'ArchivalStorage' docs for your Hadoop release.");
+ LOG.debug("More information about the invalid storage policy.", exception);
+ probablyBadPolicy = true;
+ }
+ }
+ if (!probablyBadPolicy) {
+ // This swallows FNFE, should we be throwing it? seems more likely to indicate dev
+ // misuse than a runtime problem with HDFS.
+ LOG.warn("Unable to set " + storagePolicy + " for " + path, e);
+ }
}
}
+ } else {
+ LOG.info("FileSystem isn't an instance of DistributedFileSystem; presuming it doesn't " +
+ "support setStoragePolicy.");
}
}
@@ -2060,4 +2097,4 @@ public abstract class FSUtils {
return null;
}
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/702aea5b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index e2c1488..173b0f9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -345,6 +345,49 @@ public class TestFSUtils {
}
}
+ private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception {
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ Configuration conf = htu.getConfiguration();
+ conf.set(HConstants.WAL_STORAGE_POLICY, policy);
+
+ MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
+ try {
+ assertTrue(FSUtils.isHDFS(conf));
+
+ FileSystem fs = FileSystem.get(conf);
+ Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
+ fs.mkdirs(testDir);
+
+ FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY,
+ HConstants.DEFAULT_WAL_STORAGE_POLICY);
+
+ String file = UUID.randomUUID().toString();
+ Path p = new Path(testDir, file);
+ WriteDataToHDFS(fs, p, 4096);
+ // will assert existance before deleting.
+ cleanupFile(fs, testDir);
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testSetStoragePolicyDefault() throws Exception {
+ verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY);
+ }
+
+ /* might log a warning, but still work. (always warning on Hadoop < 2.6.0) */
+ @Test
+ public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception {
+ verifyFileInDirWithStoragePolicy("ALL_SSD");
+ }
+
+ /* should log a warning, but still work. (different warning on Hadoop < 2.6.0) */
+ @Test
+ public void testSetStoragePolicyInvalid() throws Exception {
+ verifyFileInDirWithStoragePolicy("1772");
+ }
+
/**
* Ugly test that ensures we can get at the hedged read counters in dfsclient.
* Does a bit of preading with hedged reads enabled using code taken from hdfs TestPread.
@@ -493,4 +536,4 @@ public class TestFSUtils {
assertTrue(fileSys.delete(name, true));
assertTrue(!fileSys.exists(name));
}
-}
\ No newline at end of file
+}