You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ch...@apache.org on 2020/03/23 23:06:10 UTC

[phoenix] branch master updated: PHOENIX-5718 GetTable builds a table excluding the given clientTimeStamp

This is an automated email from the ASF dual-hosted git repository.

chinmayskulkarni pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 3c0002c  PHOENIX-5718 GetTable builds a table excluding the given clientTimeStamp
3c0002c is described below

commit 3c0002c51d9ac047816f3ad0cfbb5188c2fafa9a
Author: Sandeep Guggilam <sg...@sandeepg-ltm.internal.salesforce.com>
AuthorDate: Mon Mar 23 15:47:03 2020 -0700

    PHOENIX-5718 GetTable builds a table excluding the given clientTimeStamp
    
    Signed-off-by: Chinmay Kulkarni <ch...@apache.org>
---
 .../phoenix/end2end/MetaDataEndpointImplIT.java    | 32 +++++++++++++++++++++-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 22 +++++++--------
 2 files changed, 42 insertions(+), 12 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
index 6724da9..eeac2e9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
@@ -15,11 +15,13 @@ import java.util.Properties;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TableViewFinderResult;
 import org.apache.phoenix.util.ViewUtil;
 import org.junit.Test;
@@ -205,6 +207,34 @@ public class MetaDataEndpointImplIT extends ParallelStatsDisabledIT {
         assertColumnNamesEqual(PhoenixRuntime.getTableNoCache(conn, childView.toUpperCase()), "A", "B", "D");
 
     }
+    
+    @Test
+    public void testUpdateCacheWithAlteringColumns() throws Exception {
+        String tableName = generateUniqueName();
+        try (PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(
+                PhoenixConnection.class)) {
+            String ddlFormat =
+                    "CREATE TABLE IF NOT EXISTS " + tableName + "  (" + " PK2 INTEGER NOT NULL, "
+                            + "V1 INTEGER, V2 INTEGER "
+                            + " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+                conn.createStatement().execute(ddlFormat);
+                conn.createStatement().execute("ALTER TABLE " + tableName + " ADD V3 integer");
+                PTable table = PhoenixRuntime.getTable(conn, tableName.toUpperCase());
+                assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+                
+                // Set the SCN to the timestamp when V3 column is added
+                Properties props = PropertiesUtil.deepCopy(conn.getClientInfo());
+                props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(table.getTimeStamp()));
+                
+                try (PhoenixConnection metaConnection = new PhoenixConnection(conn, 
+                        conn.getQueryServices(), props)) {
+                    // Force update the cache and check if V3 is present in the returned table result
+                    table = PhoenixRuntime.getTableNoCache(metaConnection, tableName.toUpperCase());
+                    assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+                }                              
+        }      
+    }
+
 
     @Test
     public void testDroppingAColumn() throws Exception {
@@ -370,4 +400,4 @@ public class MetaDataEndpointImplIT extends ParallelStatsDisabledIT {
         assertEquals(expected, actual);
     }
 
-}
\ No newline at end of file
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 860e2d3..f371f98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -18,7 +18,6 @@
 package org.apache.phoenix.coprocessor;
 
 import static org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow;
-import static org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SPLITTABLE_SYSTEM_CATALOG;
 import static org.apache.phoenix.coprocessor.generated.MetaDataProtos.MutationCode.UNABLE_TO_CREATE_CHILD_LINK;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE_BYTES;
@@ -49,6 +48,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VIEW_TTL_HWM;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS_BYTES;
@@ -71,11 +71,10 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_STATS_FOR_PARA
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TTL_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TTL_HWM_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TTL_NOT_DEFINED;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VIEW_TTL_HWM;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
 import static org.apache.phoenix.query.QueryConstants.VIEW_MODIFIED_PROPERTY_TAG_TYPE;
 import static org.apache.phoenix.schema.PTableType.INDEX;
@@ -99,13 +98,6 @@ import java.util.NavigableMap;
 import java.util.Properties;
 import java.util.Set;
 
-import com.google.common.cache.Cache;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.RpcCallback;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.Service;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
@@ -247,6 +239,14 @@ import org.apache.phoenix.util.ViewUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.cache.Cache;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
 /**
  * Endpoint co-processor through which all Phoenix metadata mutations flow.
  * Phoenix metadata is stored in SYSTEM.CATALOG. The table specific information
@@ -2913,7 +2913,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCopr
             // Query for the latest table first, since it's not cached
             table =
                     buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion);
-            if ((table != null && table.getTimeStamp() < clientTimeStamp) ||
+            if ((table != null && table.getTimeStamp() <= clientTimeStamp) ||
                     (blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) {
                 return table;
             }