You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by yw...@apache.org on 2018/02/03 04:13:43 UTC

[2/2] bigtop git commit: BIGTOP-2990: Upgrade Phoenix version to 4.13.1-HBase-1.3

BIGTOP-2990: Upgrade Phoenix version to 4.13.1-HBase-1.3


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/943ea913
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/943ea913
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/943ea913

Branch: refs/heads/master
Commit: 943ea913de5ab7b6cbc4772d12e4347cd914950f
Parents: eafac8e
Author: Youngwoo Kim <yw...@apache.org>
Authored: Sat Feb 3 12:53:09 2018 +0900
Committer: Youngwoo Kim <yw...@apache.org>
Committed: Sat Feb 3 12:53:09 2018 +0900

----------------------------------------------------------------------
 .../src/common/phoenix/patch1-PHOENIX-4423.diff | 433 +++++++++++++++++++
 bigtop.bom                                      |   4 +-
 2 files changed, 435 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/943ea913/bigtop-packages/src/common/phoenix/patch1-PHOENIX-4423.diff
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/phoenix/patch1-PHOENIX-4423.diff b/bigtop-packages/src/common/phoenix/patch1-PHOENIX-4423.diff
new file mode 100644
index 0000000..ca76713
--- /dev/null
+++ b/bigtop-packages/src/common/phoenix/patch1-PHOENIX-4423.diff
@@ -0,0 +1,433 @@
+diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
+index f75009e32..cb28c76b4 100644
+--- a/phoenix-hive/pom.xml
++++ b/phoenix-hive/pom.xml
+@@ -141,6 +141,33 @@
+       <version>${mockito-all.version}</version>
+       <scope>test</scope>
+     </dependency>
++    <dependency>
++      <groupId>org.apache.calcite.avatica</groupId>
++      <artifactId>avatica</artifactId>
++      <!-- Overriding the version of Avatica that PQS uses so that Hive will work -->
++      <version>1.8.0</version>
++      <scope>test</scope>
++      <!-- And removing a bunch of dependencies that haven't been shaded in this older
++           Avatica version which conflict with HDFS -->
++      <exclusions>
++        <exclusion>
++          <groupId>org.hsqldb</groupId>
++          <artifactId>hsqldb</artifactId>
++        </exclusion>
++        <exclusion>
++          <groupId>com.fasterxml.jackson.core</groupId>
++          <artifactId>jackson-databind</artifactId>
++        </exclusion>
++        <exclusion>
++          <groupId>com.fasterxml.jackson.core</groupId>
++          <artifactId>jackson-annotations</artifactId>
++        </exclusion>
++        <exclusion>
++          <groupId>com.fasterxml.jackson.core</groupId>
++          <artifactId>jackson-core</artifactId>
++        </exclusion>
++      </exclusions>
++    </dependency>
+   </dependencies>
+ 
+   <build>
+diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
+index c705e2db7..3210409ce 100644
+--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
++++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/BaseHivePhoenixStoreIT.java
+@@ -84,7 +84,7 @@ public class BaseHivePhoenixStoreIT {
+         try {
+             qt = new HiveTestUtil(hiveOutputDir, hiveLogDir, clusterType, null);
+         } catch (Exception e) {
+-            LOG.error("Unexpected exception in setup", e);
++            LOG.error("Unexpected exception in setup: " + e.getMessage(), e);
+             fail("Unexpected exception in setup");
+         }
+ 
+@@ -143,14 +143,6 @@ public class BaseHivePhoenixStoreIT {
+ 
+     @AfterClass
+     public static void tearDownAfterClass() throws Exception {
+-        if (qt != null) {
+-            try {
+-                qt.shutdown();
+-            } catch (Exception e) {
+-                LOG.error("Unexpected exception in setup", e);
+-                fail("Unexpected exception in tearDown");
+-            }
+-        }
+         try {
+             conn.close();
+         } finally {
+@@ -164,5 +156,14 @@ public class BaseHivePhoenixStoreIT {
+                 }
+             }
+         }
++        // Shutdowns down the filesystem -- do this after stopping HBase.
++        if (qt != null) {
++          try {
++              qt.shutdown();
++          } catch (Exception e) {
++              LOG.error("Unexpected exception in setup", e);
++              fail("Unexpected exception in tearDown");
++          }
++      }
+     }
+ }
+diff --git a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
+index b4c4e4624..295e8b491 100644
+--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
++++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveTestUtil.java
+@@ -23,6 +23,8 @@ import org.apache.commons.io.FileUtils;
+ import org.apache.commons.io.IOUtils;
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
++import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+@@ -36,8 +38,10 @@ import org.apache.hadoop.hive.common.io.SortPrintStream;
+ import org.apache.hadoop.hive.conf.HiveConf;
+ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.api.Index;
++import org.apache.hadoop.hive.ql.QueryState;
+ import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+ import org.apache.hadoop.hive.ql.exec.Utilities;
++import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
+ import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
+ import org.apache.hadoop.hive.ql.metadata.Hive;
+ import org.apache.hadoop.hive.ql.metadata.Table;
+@@ -71,6 +75,10 @@ import java.io.OutputStream;
+ import java.io.OutputStreamWriter;
+ import java.io.PrintStream;
+ import java.io.StringWriter;
++import java.lang.reflect.Constructor;
++import java.lang.reflect.InvocationTargetException;
++import java.lang.reflect.Method;
++import java.lang.reflect.Modifier;
+ import java.net.URL;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+@@ -113,6 +121,7 @@ public class HiveTestUtil {
+     private ParseDriver pd;
+     protected Hive db;
+     protected HiveConf conf;
++    protected QueryState queryState;
+     private BaseSemanticAnalyzer sem;
+     protected final boolean overWrite;
+     private CliDriver cliDriver;
+@@ -120,6 +129,7 @@ public class HiveTestUtil {
+     private HadoopShims.MiniDFSShim dfs = null;
+     private String hadoopVer = null;
+     private HiveTestSetup setup = null;
++    private TezSessionState tezSessionState = null;
+     private boolean isSessionStateStarted = false;
+     private static final String javaVersion = getJavaVersion();
+ 
+@@ -224,7 +234,7 @@ public class HiveTestUtil {
+             // set fs.default.name to the uri of mini-dfs
+             String dfsUriString = WindowsPathUtil.getHdfsUriString(dfs.getFileSystem().getUri()
+                     .toString());
+-            conf.setVar(HiveConf.ConfVars.HADOOPFS, dfsUriString);
++            conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, dfsUriString);
+             // hive.metastore.warehouse.dir needs to be set relative to the mini-dfs
+             conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE,
+                     (new Path(dfsUriString,
+@@ -270,10 +280,18 @@ public class HiveTestUtil {
+             LOG.info("Setting hive-site: " + HiveConf.getHiveSiteLocation());
+         }
+         conf = new HiveConf();
++        queryState = createQueryState(conf);
++        // Make sure QueryState didn't make a copy of our HiveConf
++        conf = queryState.getConf();
++        // Make sure YARN doesn't abort startup because of a near-full disk.
++        conf.setIfUnset(
++                "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
++                "99.0");
+         String tmpBaseDir = System.getProperty("test.tmp.dir");
+         if (tmpBaseDir == null || tmpBaseDir == "") {
+             tmpBaseDir = System.getProperty("java.io.tmpdir");
+         }
++        LOG.info("Writing metastore database to " + tmpBaseDir);
+         String metaStoreURL = "jdbc:derby:" + tmpBaseDir + File.separator + "metastore_dbtest;" +
+                 "create=true";
+         conf.set(ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
+@@ -307,11 +325,14 @@ public class HiveTestUtil {
+             String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());
+             if (clusterType == MiniClusterType.tez) {
+                 conf.set("hive.execution.engine", "tez");
+-                mr = shims.getMiniTezCluster(conf, 1, uriString, 1);
++                mr = getMiniTezCluster(shims, conf, uriString);
+             } else {
+                 conf.set("hive.execution.engine", "mr");
+                 mr = shims.getMiniMrCluster(conf, 1, uriString, 1);
+-
++                conf.setLong("mapreduce.map.memory.mb", 512*1024*1024);
++                conf.set("mapreduce.map.java.opts", "-Xmx512m");
++                conf.setInt("mapreduce.reduce.memory.mb", 512*1024*1024);
++                conf.set("mapreduce.reduce.java.opts", "-Xmx512m");
+             }
+         }
+ 
+@@ -344,6 +365,60 @@ public class HiveTestUtil {
+         init();
+     }
+ 
++    private static HadoopShims.MiniMrShim getMiniTezCluster(HadoopShims shim, HiveConf conf, String uriString) {
++        // Hive <2.2
++        try {
++            Method m = shim.getClass().getMethod("getMiniTezCluster", Configuration.class, int.class, String.class, int.class);
++            // shims.getMiniTezCluster(conf, 1, uriString, 1);
++            return (HadoopShims.MiniMrShim) m.invoke(shim, conf, 1, uriString, 1);
++        } catch (Exception e) {
++            if (e instanceof RuntimeException) {
++                throw (RuntimeException) e;
++            }
++            // Pass
++        }
++        // Hive >=2.2
++        try {
++            Method m = shim.getClass().getMethod("getMiniTezCluster", Configuration.class, int.class, String.class, boolean.class);
++            // shims.getMiniTezCluster(conf, 1, uriString, false);
++            return (HadoopShims.MiniMrShim) m.invoke(shim, conf, 1, uriString, false);
++        } catch (Exception e) {
++            if (e instanceof RuntimeException) {
++                throw (RuntimeException) e;
++            }
++            throw new RuntimeException(e);
++        }
++    }
++
++    private static QueryState createQueryState(HiveConf conf) {
++        // Don't really care about caching the Class and Method objects as this is test code.
++        try {
++            Class<?> clz = Class.forName("org.apache.hadoop.hive.ql.QueryState");
++            // Hive <3.0 return new QueryState(conf);
++            try {
++                Constructor<?> cons = clz.getConstructor(HiveConf.class);
++                // But, this constructor also exists in Hive3 as private.
++                if (Modifier.isPublic(cons.getModifiers())) {
++                    return (QueryState) cons.newInstance(conf);
++                }
++                LOG.warn("Constructor was not public: " + cons);
++            } catch (NoSuchMethodException | SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
++                LOG.warn("Failed to invoke `new QueryState(HiveConf)` via reflection", e);
++            }
++
++            // Hive 3 return new QueryState.Builder().withHiveConf(conf).build();
++            clz = Class.forName("org.apache.hadoop.hive.ql.QueryState$Builder");
++            Object builder = clz.newInstance();
++            Method withHiveConfMethod = clz.getMethod("withHiveConf", HiveConf.class);
++            withHiveConfMethod.invoke(builder, conf);
++            Method buildMethod = clz.getMethod("build");
++            return (QueryState) buildMethod.invoke(builder);
++        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | NoSuchMethodException | SecurityException | IllegalArgumentException | InvocationTargetException e) {
++            LOG.warn("Failed to invoke `new QueryState.Builder().withHiveConf(conf).build()` via reflection", e);
++            throw new RuntimeException(e);
++        }
++    }
++
+     public void shutdown() throws Exception {
+         cleanUp();
+         setup.tearDown();
+@@ -571,13 +646,15 @@ public class HiveTestUtil {
+     public void init() throws Exception {
+         testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
+         conf.setBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD, false);
++        conf.setBoolean("datanucleus.schema.autoCreateTables", true);
+         String execEngine = conf.get("hive.execution.engine");
++        conf.setBoolean("hive.metastore.schema.verification", false);
+         conf.set("hive.execution.engine", "mr");
+         SessionState.start(conf);
+         conf.set("hive.execution.engine", execEngine);
+         db = Hive.get(conf);
+         pd = new ParseDriver();
+-        sem = new SemanticAnalyzer(conf);
++        sem = new SemanticAnalyzer(queryState);
+     }
+ 
+     public void init(String tname) throws Exception {
+@@ -598,7 +675,7 @@ public class HiveTestUtil {
+ 
+         HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
+                 "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator");
+-        Utilities.clearWorkMap();
++        Utilities.clearWorkMapForConf(conf);
+         CliSessionState ss = new CliSessionState(conf);
+         assert ss != null;
+         ss.in = System.in;
+@@ -627,6 +704,14 @@ public class HiveTestUtil {
+         ss.setIsSilent(true);
+         SessionState oldSs = SessionState.get();
+ 
++        if (oldSs != null && clusterType == MiniClusterType.tez) {
++          // Copy the tezSessionState from the old CliSessionState.
++          tezSessionState = oldSs.getTezSession();
++          oldSs.setTezSession(null);
++          ss.setTezSession(tezSessionState);
++          oldSs.close();
++        }
++
+         if (oldSs != null && clusterType == MiniClusterType.tez) {
+             oldSs.close();
+         }
+@@ -1019,7 +1104,7 @@ public class HiveTestUtil {
+ 
+     public void resetParser() throws SemanticException {
+         pd = new ParseDriver();
+-        sem = new SemanticAnalyzer(conf);
++        sem = new SemanticAnalyzer(queryState);
+     }
+ 
+     public TreeMap<String, String> getQMap() {
+diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
+index 4e9f46522..0f8ee93ac 100644
+--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
++++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/PhoenixStorageHandler.java
+@@ -35,8 +35,8 @@ import org.apache.hadoop.hive.ql.metadata.InputEstimator;
+ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+ import org.apache.hadoop.hive.ql.plan.TableDesc;
+ import org.apache.hadoop.hive.ql.session.SessionState;
++import org.apache.hadoop.hive.serde2.AbstractSerDe;
+ import org.apache.hadoop.hive.serde2.Deserializer;
+-import org.apache.hadoop.hive.serde2.SerDe;
+ import org.apache.hadoop.hive.shims.ShimLoader;
+ import org.apache.hadoop.mapred.InputFormat;
+ import org.apache.hadoop.mapred.JobConf;
+@@ -242,7 +242,7 @@ public class PhoenixStorageHandler extends DefaultStorageHandler implements
+     }
+ 
+     @Override
+-    public Class<? extends SerDe> getSerDeClass() {
++    public Class<? extends AbstractSerDe> getSerDeClass() {
+         return PhoenixSerDe.class;
+     }
+ 
+diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
+index f0a5dd696..a8315e2a1 100644
+--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
++++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
+@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.client.Scan;
+ import org.apache.hadoop.hbase.util.Bytes;
+ import org.apache.hadoop.hbase.util.RegionSizeCalculator;
+ import org.apache.hadoop.hive.conf.HiveConf;
+-import org.apache.hadoop.hive.ql.exec.Utilities;
++import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
+ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+ import org.apache.hadoop.hive.ql.plan.TableScanDesc;
+ import org.apache.hadoop.hive.serde.serdeConstants;
+@@ -103,7 +103,7 @@ public class PhoenixInputFormat<T extends DBWritable> implements InputFormat<Wri
+             String filterExprSerialized = jobConf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
+             if (filterExprSerialized != null) {
+                 ExprNodeGenericFuncDesc filterExpr =
+-                        Utilities.deserializeExpression(filterExprSerialized);
++                        SerializationUtilities.deserializeExpression(filterExprSerialized);
+                 PhoenixPredicateDecomposer predicateDecomposer =
+                         PhoenixPredicateDecomposer.create(Arrays.asList(jobConf.get(serdeConstants.LIST_COLUMNS).split(",")));
+                 predicateDecomposer.decomposePredicate(filterExpr);
+diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
+index 659983a71..4ba1d79d3 100644
+--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
++++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/ql/index/IndexPredicateAnalyzer.java
+@@ -344,18 +344,12 @@ public class IndexPredicateAnalyzer {
+             searchConditions, Object... nodeOutputs) throws SemanticException {
+ 
+         if (FunctionRegistry.isOpAnd(expr)) {
+-            assert (nodeOutputs.length == 2);
+-            ExprNodeDesc residual1 = (ExprNodeDesc) nodeOutputs[0];
+-            ExprNodeDesc residual2 = (ExprNodeDesc) nodeOutputs[1];
+-            if (residual1 == null) {
+-                return residual2;
+-            }
+-            if (residual2 == null) {
+-                return residual1;
+-            }
+             List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
+-            residuals.add(residual1);
+-            residuals.add(residual2);
++            for(Object obj : nodeOutputs) {
++                if(obj!=null) {
++                    residuals.add((ExprNodeDesc) obj);
++                }
++            }
+             return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry
+                     .getGenericUDFForAnd(), residuals);
+         }
+diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
+index 19c26e55b..22f459858 100644
+--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
++++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixStorageHandlerUtil.java
+@@ -22,6 +22,8 @@ import com.google.common.collect.Maps;
+ import java.io.ByteArrayInputStream;
+ import java.io.IOException;
+ import java.lang.reflect.Array;
++import java.lang.reflect.InvocationTargetException;
++import java.lang.reflect.Method;
+ import java.math.BigDecimal;
+ import java.net.InetAddress;
+ import java.net.InetSocketAddress;
+@@ -35,8 +37,11 @@ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Properties;
++import java.util.concurrent.atomic.AtomicReference;
++
+ import javax.naming.NamingException;
+ import org.apache.commons.logging.Log;
++import org.apache.commons.logging.LogFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hbase.HRegionLocation;
+ import org.apache.hadoop.hbase.util.Strings;
+@@ -60,6 +65,9 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+  */
+ 
+ public class PhoenixStorageHandlerUtil {
++    private static final Log LOG = LogFactory.getLog(PhoenixStorageHandlerUtil.class);
++    private static final AtomicReference<Method> GET_BUCKET_METHOD_REF = new AtomicReference<>();
++    private static final AtomicReference<Method> GET_BUCKET_ID_METHOD_REF = new AtomicReference<>();
+ 
+     public static String getTargetTableName(Table table) {
+         Map<String, String> tableParameterMap = table.getParameters();
+@@ -268,7 +276,7 @@ public class PhoenixStorageHandlerUtil {
+     public static String getOptionsValue(Options options) {
+         StringBuilder content = new StringBuilder();
+ 
+-        int bucket = options.getBucket();
++        int bucket = getBucket(options);
+         String inspectorInfo = options.getInspector().getCategory() + ":" + options.getInspector()
+                 .getTypeName();
+         long maxTxnId = options.getMaximumTransactionId();
+@@ -285,4 +293,27 @@ public class PhoenixStorageHandlerUtil {
+ 
+         return content.toString();
+     }
++
++    private static int getBucket(Options options) {
++        Method getBucketMethod = GET_BUCKET_METHOD_REF.get();
++        try {
++            if (getBucketMethod == null) {
++                getBucketMethod = Options.class.getMethod("getBucket");
++                GET_BUCKET_METHOD_REF.set(getBucketMethod);
++            }
++            return (int) getBucketMethod.invoke(options);
++        } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) {
++            LOG.trace("Failed to invoke Options.getBucket()", e);
++        }
++        Method getBucketIdMethod = GET_BUCKET_ID_METHOD_REF.get();
++        try {
++            if (getBucketIdMethod == null) {
++                getBucketIdMethod = Options.class.getMethod("getBucketId");
++                GET_BUCKET_ID_METHOD_REF.set(getBucketMethod);
++            }
++            return (int) getBucketIdMethod.invoke(options);
++        } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) {
++            throw new RuntimeException("Failed to invoke Options.getBucketId()", e);
++        }
++    }
+ }

http://git-wip-us.apache.org/repos/asf/bigtop/blob/943ea913/bigtop.bom
----------------------------------------------------------------------
diff --git a/bigtop.bom b/bigtop.bom
index 1c129b1..1cf7f0a 100644
--- a/bigtop.bom
+++ b/bigtop.bom
@@ -347,8 +347,8 @@ bigtop {
        * to the base HBase version. Update as needed whenever changing the
        * HBase version in the BOM.
        */
-      phoenix.hbase ='HBase-1.1'
-      version { base = "4.9.0-${phoenix.hbase}"; pkg = '4.9.0'; release = 1 }
+      phoenix.hbase ='HBase-1.3'
+      version { base = "4.13.1-${phoenix.hbase}"; pkg = '4.13.1'; release = 1 }
       tarball { destination = "$name-${version.base}-src.tar.gz"
                 source      = "apache-$name-${version.base}-src.tar.gz" }
       url     { download_path = "/$name/apache-$name-${version.base}/src"