You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ma...@apache.org on 2015/12/11 03:44:21 UTC

[43/52] [abbrv] phoenix git commit: PHOENIX-2490 Exception while running update statistics on transactional table

PHOENIX-2490 Exception while running update statistics on transactional table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8ce3b580
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8ce3b580
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8ce3b580

Branch: refs/heads/calcite
Commit: 8ce3b580fb14f23a49b3e2c47ab2628e140ae2da
Parents: 93acc27
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Fri Dec 4 13:48:48 2015 -0800
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Mon Dec 7 12:22:13 2015 -0800

----------------------------------------------------------------------
 .../phoenix/end2end/StatsCollectorIT.java       | 33 ++++++++++++++------
 .../apache/phoenix/compile/PostDDLCompiler.java |  3 +-
 2 files changed, 25 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ce3b580/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index 6c392f5..23859d6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -59,27 +59,40 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
         props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
         props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
         props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1024));
+        props.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.toString(true));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
 
     @Test
-    public void testUpdateStatsForTheTable() throws Throwable {
-        Connection conn;
+    public void testUpdateStatsForNonTxnTable() throws Throwable {
+        helpTestUpdateStats(false);
+    }
+    
+    @Test
+    public void testUpdateStatsForTxnTable() throws Throwable {
+        helpTestUpdateStats(true);
+    }
+
+	private void helpTestUpdateStats(boolean transactional) throws SQLException, IOException,
+			InterruptedException {
+		Connection conn;
         PreparedStatement stmt;
         ResultSet rs;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String tableName = "T" + (transactional ? "_TXN" : "");
         // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
         conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(
-                "CREATE TABLE t ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
-                        + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC)) \n");
+                "CREATE TABLE " + tableName +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
+                        + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC))" 
+                		+ (transactional ? " TRANSACTIONAL=true" : ""));
         String[] s;
         Array array;
-        conn = upsertValues(props, "t");
+        conn = upsertValues(props, tableName);
         // CAll the update statistics query here. If already major compaction has run this will not get executed.
-        stmt = conn.prepareStatement("UPDATE STATISTICS T");
+        stmt = conn.prepareStatement("UPDATE STATISTICS " + tableName);
         stmt.execute();
-        stmt = upsertStmt(conn, "t");
+        stmt = upsertStmt(conn, tableName);
         stmt.setString(1, "z");
         s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
         array = conn.createArrayOf("VARCHAR", s);
@@ -91,12 +104,12 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
         conn.close();
         conn = DriverManager.getConnection(getUrl(), props);
         // This analyze would not work
-        stmt = conn.prepareStatement("UPDATE STATISTICS T");
+        stmt = conn.prepareStatement("UPDATE STATISTICS " + tableName);
         stmt.execute();
-        rs = conn.createStatement().executeQuery("SELECT k FROM T");
+        rs = conn.createStatement().executeQuery("SELECT k FROM " + tableName);
         assertTrue(rs.next());
         conn.close();
-    }
+	}
 
     @Test
     public void testUpdateStatsWithMultipleTables() throws Throwable {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ce3b580/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
index 506623b..192a031 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
@@ -21,6 +21,7 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
@@ -184,7 +185,7 @@ public class PostDDLCompiler {
                         // FIXME: DDL operations aren't transactional, so we're basing the timestamp on a server timestamp.
                         // Not sure what the fix should be. We don't need conflict detection nor filtering of invalid transactions
                         // in this case, so maybe this is ok.
-                        if (tableRef.getTable().isTransactional()) {
+                        if (ts!=HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
                             ts = TransactionUtil.convertToNanoseconds(ts);
                         }
                         ScanUtil.setTimeRange(scan, ts);