You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by sa...@apache.org on 2016/11/18 07:35:28 UTC

phoenix git commit: PHOENIX-3497 Provide a work around for HBASE-17122

Repository: phoenix
Updated Branches:
  refs/heads/4.8-HBase-1.2 16795aef6 -> cd48969c9


PHOENIX-3497 Provide a work around for HBASE-17122


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cd48969c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cd48969c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cd48969c

Branch: refs/heads/4.8-HBase-1.2
Commit: cd48969c9ef6784f6448c884981e78519eebf1dd
Parents: 16795ae
Author: Samarth <sa...@salesforce.com>
Authored: Thu Nov 17 23:35:11 2016 -0800
Committer: Samarth <sa...@salesforce.com>
Committed: Thu Nov 17 23:35:11 2016 -0800

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterTableIT.java    | 24 ++++++++++++++++++++
 .../phoenix/iterate/BaseResultIterators.java    | 16 +++++++++++++
 .../org/apache/phoenix/util/ServerUtil.java     |  5 ++++
 3 files changed, 45 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd48969c/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index e09dcea..7b3fc47 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -42,6 +42,7 @@ import java.util.Properties;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeepDeletedCells;
+import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
@@ -49,6 +50,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.exception.PhoenixIOException;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -2216,5 +2218,27 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
 		}
 	}
 	
+	@Test
+    public void testQueryingDisabledTable() throws Exception {
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            String tableName = generateRandomString();
+            conn.createStatement().execute(
+                    "CREATE TABLE " + tableName
+                    + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) ");
+            try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+                admin.disableTable(Bytes.toBytes(tableName));
+            }
+            String query = "SELECT * FROM " + tableName + " WHERE 1=1";
+            try (Connection conn2 = DriverManager.getConnection(getUrl())) {
+                try (ResultSet rs = conn2.createStatement().executeQuery(query)) {
+                    assertFalse(rs.next());
+                    fail();
+                } catch (PhoenixIOException ioe) {
+                    assertTrue(ioe.getCause() instanceof TableNotEnabledException);
+                }
+            }
+        }
+    }
+	
 }
  

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd48969c/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 581e0cd..3bb6463 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -52,6 +52,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.PageFilter;
@@ -777,6 +779,20 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
                         try { // Rethrow as SQLException
                             throw ServerUtil.parseServerException(e);
                         } catch (StaleRegionBoundaryCacheException e2) {
+                           /*
+                            * Note that a StaleRegionBoundaryCacheException could be thrown in multiple scenarios including splits, region
+                            * moves, table disabled, etc. See ServerUtil.parseServerException() for details. 
+                            * Because of HBASE-17122 we need to explicitly check whether this exception is being
+                            * thrown because the table was disabled or because a split happened. This obviously is a HACK.
+                            * With older versions of HBase we were correctly thrown a TableNotEnabledException so this 
+                            * kind of hackery wasn't needed.
+                            * TODO: remove this once HBASE-17122 is fixed.
+                            */
+                            try (HBaseAdmin admin = context.getConnection().getQueryServices().getAdmin()) {
+                                if (admin.isTableDisabled(physicalTableName)) {
+                                    throw new TableNotEnabledException(physicalTableName);
+                                }
+                            }
                             scanPairItr.remove();
                             // Catch only to try to recover from region boundary cache being out of date
                             if (!clearedCache) { // Clear cache once so that we rejigger job based on new boundaries

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd48969c/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index a3940fc..024ab90 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -115,6 +115,11 @@ public class ServerUtil {
     
     public static SQLException parseServerExceptionOrNull(Throwable t) {
         while (t.getCause() != null) {
+            /*
+             * Note that a NotServingRegionException could be thrown in multiple scenarios including splits, region
+             * move, table disabled, etc. This is a hack and is meant to address the buggy behavior introduced in HBase
+             * 0.98.21 and beyond. See HBASE-17122 for details.
+             */
             if (t instanceof NotServingRegionException) {
                 return parseRemoteException(new StaleRegionBoundaryCacheException());
             }