You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ja...@apache.org on 2020/01/19 17:06:43 UTC
[hbase] branch master updated: HBASE-23686 Revert binary
incompatible change in ByteRangeUtils and removed reflections in
CommonFSUtils
This is an automated email from the ASF dual-hosted git repository.
janh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new 50e2644 HBASE-23686 Revert binary incompatible change in ByteRangeUtils and removed reflections in CommonFSUtils
50e2644 is described below
commit 50e264485d0f98aeb1bd0058ad3ed9d217840b7f
Author: Jan Hentschel <ja...@apache.org>
AuthorDate: Sun Jan 19 18:06:31 2020 +0100
HBASE-23686 Revert binary incompatible change in ByteRangeUtils and removed reflections in CommonFSUtils
Signed-off-by: Sean Busbey <bu...@apache.org>
---
.../resources/hbase/checkstyle-suppressions.xml | 4 +
.../java/org/apache/hadoop/hbase/net/Address.java | 2 +-
.../apache/hadoop/hbase/util/ByteRangeUtils.java | 5 +-
.../apache/hadoop/hbase/util/CommonFSUtils.java | 152 +++++----------------
4 files changed, 43 insertions(+), 120 deletions(-)
diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
index 0694b35..9351ecb 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
@@ -51,4 +51,8 @@
<suppress checks="EmptyBlock" files="org.apache.hadoop.hbase.TestTimeout"/>
<suppress checks="InnerAssignment" files="org.apache.hadoop.hbase.rest.PerformanceEvaluation"/>
<suppress checks="EmptyBlock" files="org.apache.hadoop.hbase.rest.PerformanceEvaluation"/>
+ <!-- Will not have a private constructor, because it is InterfaceAudience.Public -->
+ <suppress checks="HideUtilityClassConstructor" files="org.apache.hadoop.hbase.util.ByteRangeUtils"/>
+ <!-- Will not be final, because it is InterfaceAudience.Public -->
+ <suppress checks="FinalClass" files="org.apache.hadoop.hbase.net.Address"/>
</suppressions>
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
index d76ef9f..48fa522 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java
@@ -31,7 +31,7 @@ import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
* We cannot have Guava classes in our API hence this Type.
*/
@InterfaceAudience.Public
-public final class Address implements Comparable<Address> {
+public class Address implements Comparable<Address> {
private HostAndPort hostAndPort;
private Address(HostAndPort hostAndPort) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
index fb0b336..9acfa26 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
@@ -30,10 +30,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
* Utility methods for working with {@link ByteRange}.
*/
@InterfaceAudience.Public
-public final class ByteRangeUtils {
- private ByteRangeUtils() {
- }
-
+public class ByteRangeUtils {
public static int numEqualPrefixBytes(ByteRange left, ByteRange right, int rightInnerOffset) {
int maxCompares = Math.min(left.getLength(), right.getLength() - rightInnerOffset);
final byte[] lbytes = left.getBytes();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index 89ad897..ea0cb2b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -149,69 +149,22 @@ public abstract class CommonFSUtils {
* Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time.
*
- * use reflection to search for getDefaultBlockSize(Path f)
- * if the method doesn't exist, fall back to using getDefaultBlockSize()
- *
* @param fs filesystem object
* @return the default block size for the path's filesystem
- * @throws IOException e
*/
- public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
- Method m = null;
- Class<? extends FileSystem> cls = fs.getClass();
- try {
- m = cls.getMethod("getDefaultBlockSize", Path.class);
- } catch (NoSuchMethodException e) {
- LOG.info("FileSystem doesn't support getDefaultBlockSize");
- } catch (SecurityException e) {
- LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
- m = null; // could happen on setAccessible()
- }
- if (m == null) {
- return fs.getDefaultBlockSize(path);
- } else {
- try {
- Object ret = m.invoke(fs, path);
- return ((Long)ret).longValue();
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
+ public static long getDefaultBlockSize(final FileSystem fs, final Path path) {
+ return fs.getDefaultBlockSize(path);
}
/*
* Get the default replication.
*
- * use reflection to search for getDefaultReplication(Path f)
- * if the method doesn't exist, fall back to using getDefaultReplication()
- *
* @param fs filesystem object
* @param f path of file
* @return default replication for the path's filesystem
- * @throws IOException e
*/
- public static short getDefaultReplication(final FileSystem fs, final Path path)
- throws IOException {
- Method m = null;
- Class<? extends FileSystem> cls = fs.getClass();
- try {
- m = cls.getMethod("getDefaultReplication", Path.class);
- } catch (NoSuchMethodException e) {
- LOG.info("FileSystem doesn't support getDefaultReplication");
- } catch (SecurityException e) {
- LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
- m = null; // could happen on setAccessible()
- }
- if (m == null) {
- return fs.getDefaultReplication(path);
- } else {
- try {
- Object ret = m.invoke(fs, path);
- return ((Number)ret).shortValue();
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
+ public static short getDefaultReplication(final FileSystem fs, final Path path) {
+ return fs.getDefaultReplication(path);
}
/**
@@ -568,83 +521,52 @@ public abstract class CommonFSUtils {
*/
private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,
final String storagePolicy) throws IOException {
- Method m = null;
Exception toThrow = null;
+
try {
- m = fs.getClass().getDeclaredMethod("setStoragePolicy", Path.class, String.class);
- m.setAccessible(true);
- } catch (NoSuchMethodException e) {
- toThrow = e;
- final String msg = "FileSystem doesn't support setStoragePolicy; HDFS-6584, HDFS-9345 " +
- "not available. This is normal and expected on earlier Hadoop versions.";
- if (!warningMap.containsKey(fs)) {
- warningMap.put(fs, true);
- LOG.warn(msg, e);
- } else if (LOG.isDebugEnabled()) {
- LOG.debug(msg, e);
+ fs.setStoragePolicy(path, storagePolicy);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Set storagePolicy={} for path={}", storagePolicy, path);
}
- m = null;
- } catch (SecurityException e) {
+ } catch (Exception e) {
toThrow = e;
- final String msg = "No access to setStoragePolicy on FileSystem from the SecurityManager; " +
- "HDFS-6584, HDFS-9345 not available. This is unusual and probably warrants an email " +
- "to the user@hbase mailing list. Please be sure to include a link to your configs, and " +
- "logs that include this message and period of time before it. Logs around service " +
- "start up will probably be useful as well.";
+ // This swallows FNFE, should we be throwing it? seems more likely to indicate dev
+ // misuse than a runtime problem with HDFS.
if (!warningMap.containsKey(fs)) {
warningMap.put(fs, true);
- LOG.warn(msg, e);
+ LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path + ". " +
+ "DEBUG log level might have more details.", e);
} else if (LOG.isDebugEnabled()) {
- LOG.debug(msg, e);
+ LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
}
- m = null; // could happen on setAccessible() or getDeclaredMethod()
- }
- if (m != null) {
- try {
- m.invoke(fs, path, storagePolicy);
+
+ // check for lack of HDFS-7228
+ if (e instanceof RemoteException &&
+ HadoopIllegalArgumentException.class.getName().equals(
+ ((RemoteException)e).getClassName())) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Set storagePolicy={} for path={}", storagePolicy, path);
- }
- } catch (Exception e) {
- toThrow = e;
- // This swallows FNFE, should we be throwing it? seems more likely to indicate dev
- // misuse than a runtime problem with HDFS.
- if (!warningMap.containsKey(fs)) {
- warningMap.put(fs, true);
- LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path + ". " +
- "DEBUG log level might have more details.", e);
- } else if (LOG.isDebugEnabled()) {
- LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
+ LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +
+ "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
+ "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
+ "more information see the 'ArchivalStorage' docs for your Hadoop release.");
}
- // check for lack of HDFS-7228
- if (e instanceof InvocationTargetException) {
- final Throwable exception = e.getCause();
- if (exception instanceof RemoteException &&
- HadoopIllegalArgumentException.class.getName().equals(
- ((RemoteException)exception).getClassName())) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +
- "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
- "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
- "more information see the 'ArchivalStorage' docs for your Hadoop release.");
- }
- // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation
- // that throws UnsupportedOperationException
- } else if (exception instanceof UnsupportedOperationException) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("The underlying FileSystem implementation doesn't support " +
- "setStoragePolicy. This is probably intentional on their part, since HDFS-9345 " +
- "appears to be present in your version of Hadoop. For more information check " +
- "the Hadoop documentation on 'ArchivalStorage', the Hadoop FileSystem " +
- "specification docs from HADOOP-11981, and/or related documentation from the " +
- "provider of the underlying FileSystem (its name should appear in the " +
- "stacktrace that accompanies this message). Note in particular that Hadoop's " +
- "local filesystem implementation doesn't support storage policies.", exception);
- }
- }
+ // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation
+ // that throws UnsupportedOperationException
+ } else if (e instanceof UnsupportedOperationException) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("The underlying FileSystem implementation doesn't support " +
+ "setStoragePolicy. This is probably intentional on their part, since HDFS-9345 " +
+ "appears to be present in your version of Hadoop. For more information check " +
+ "the Hadoop documentation on 'ArchivalStorage', the Hadoop FileSystem " +
+ "specification docs from HADOOP-11981, and/or related documentation from the " +
+ "provider of the underlying FileSystem (its name should appear in the " +
+ "stacktrace that accompanies this message). Note in particular that Hadoop's " +
+ "local filesystem implementation doesn't support storage policies.", e);
}
}
}
+
if (toThrow != null) {
throw new IOException(toThrow);
}