You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by td...@apache.org on 2017/05/10 18:04:00 UTC

[01/46] phoenix git commit: Initial version of transaction abstraction layer

Repository: phoenix
Updated Branches:
  refs/heads/omid 6b16183ed -> 476d13d53


Initial version of transaction abstraction layer


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7c2ca8c5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7c2ca8c5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7c2ca8c5

Branch: refs/heads/omid
Commit: 7c2ca8c51a5c551c6027e70bd2ad218e6743f3d6
Parents: 3519b3b
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Thu Feb 9 10:45:07 2017 +0200
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Thu Feb 9 10:45:07 2017 +0200

----------------------------------------------------------------------
 .../transaction/OmidTransactionContext.java     |  95 +++++++++++++
 .../transaction/OmidTransactionTable.java       | 139 +++++++++++++++++++
 .../transaction/PhoenixTransactionContext.java  | 106 ++++++++++++++
 .../transaction/PhoenixTransactionalTable.java  | 138 ++++++++++++++++++
 .../transaction/TephraTransactionContext.java   |  95 +++++++++++++
 .../transaction/TephraTransactionTable.java     | 139 +++++++++++++++++++
 6 files changed, 712 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c2ca8c5/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
new file mode 100644
index 0000000..5f0bdce
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -0,0 +1,95 @@
+package org.apache.phoenix.transaction;
+
+import java.sql.SQLException;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.phoenix.schema.PTable;
+
+public class OmidTransactionContext implements PhoenixTransactionContext {
+
+    @Override
+    public void begin() throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void commit() throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void abort() throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void abort(SQLException e) throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void checkpoint() throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void commitDDL(PTable dataTable) throws SQLException,
+            InterruptedException, TimeoutException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void markDML(PTable table) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void join(PhoenixTransactionContext ctx) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void addTransactionTable(PhoenixTransactionalTable table) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void addTransactionToTable(PhoenixTransactionalTable table) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public boolean isTransactionRunning() {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public void reset() {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public long getTransactionId() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public long getReadPointer() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c2ca8c5/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
new file mode 100644
index 0000000..f5cdd17
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -0,0 +1,139 @@
+package org.apache.phoenix.transaction;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+
+public class OmidTransactionTable implements PhoenixTransactionalTable {
+
+    @Override
+    public Result get(Get get) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public void put(Put put) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void delete(Delete delete) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public ResultScanner getScanner(Scan scan) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public byte[] getTableName() {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public Configuration getConfiguration() {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public HTableDescriptor getTableDescriptor() throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public boolean exists(Get get) throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public Result[] get(List<Get> gets) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public ResultScanner getScanner(byte[] family) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public ResultScanner getScanner(byte[] family, byte[] qualifier)
+            throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public void put(List<Put> puts) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void delete(List<Delete> deletes) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public HTableInterface getHTable() {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public void setAutoFlush(boolean autoFlush) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public boolean isAutoFlush() {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public long getWriteBufferSize() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public void setWriteBufferSize(long writeBufferSize) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void flushCommits() throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void close() throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c2ca8c5/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
new file mode 100644
index 0000000..b391144
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -0,0 +1,106 @@
+package org.apache.phoenix.transaction;
+
+import org.apache.phoenix.schema.PTable;
+
+import java.sql.SQLException;
+import java.util.concurrent.TimeoutException;
+
+public interface PhoenixTransactionContext {
+
+    /**
+     * Starts a transaction
+     * 
+     * @throws SQLException
+     */
+    public void begin() throws SQLException;
+    
+    /**
+     * Commits a transaction
+     * 
+     * @throws SQLException
+     */
+    public void commit() throws SQLException;
+    
+    /**
+     * Rollback a transaction
+     * 
+     * @throws SQLException
+     */
+    public void abort() throws SQLException;
+
+    /**
+     * Rollback a transaction
+     * 
+     * @param e  
+     * @throws SQLException
+     */
+    public void abort(SQLException e) throws SQLException;
+    
+    /**
+     * Create a checkpoint in a transaction as defined in [TEPHRA-96]
+     * @throws SQLException
+     */
+    public void checkpoint() throws SQLException;
+    
+    /**
+     * Commit DDL to guarantee that no transaction started before create index 
+     * and committed afterwards, as explained in [PHOENIX-2478], [TEPHRA-157] and [OMID-56].
+     * 
+     * @param dataTable  the table that the DDL command works on
+     * @throws SQLException
+     * @throws InterruptedException
+     * @throws TimeoutException
+     */
+    public void commitDDL(PTable dataTable)
+            throws SQLException, InterruptedException, TimeoutException;
+    
+    /**
+     * mark DML with table information for conflict detection of concurrent 
+     * DDL operation, as explained in [PHOENIX-2478], [TEPHRA-157] and [OMID-56].
+     * 
+     * @param table  the table that the DML command works on
+     */
+    public void markDML(PTable table);
+    
+    /**
+     * Augment the current context with ctx modified keys
+     * 
+     * @param ctx
+     */
+    public void join(PhoenixTransactionContext ctx);
+    
+    /** 
+     * Add transaction table to the context.
+     * Will be mostly used by Tephra, since Omid keeps the changeset inside the transaction while 
+     * Tephra keeps it distributed at the different awares.
+     * 
+     * @param table
+     */
+    public void addTransactionTable(PhoenixTransactionalTable table);
+    
+    /**
+     * Add transaction to the table. 
+     * @param table
+     */
+    public void addTransactionToTable(PhoenixTransactionalTable table);
+    
+    /**
+     * Is there a transaction in flight?
+     */
+    public boolean isTransactionRunning();
+
+    /**
+     * Reset transaction state
+     */
+    public void reset();
+    
+    /** 
+     * Returns transaction unique identifier
+     */
+    long getTransactionId();
+    
+    /**
+     * Returns transaction snapshot id
+     */
+    long getReadPointer();
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c2ca8c5/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
new file mode 100644
index 0000000..2316dc4
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
@@ -0,0 +1,138 @@
+package org.apache.phoenix.transaction;
+
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HTableDescriptor;
+
+import java.io.IOException;
+import java.util.List;
+
+public interface PhoenixTransactionalTable {
+
+    /**
+     * Transaction version of {@link HTableInterface#get(Get get)}
+     * @param get
+     * @return 
+     * @throws IOException
+     */
+    public Result get(Get get) throws IOException;
+
+    /**
+     * Transactional version of {@link HTableInterface#put(Put put)}
+     * @param put
+     * @throws IOException
+     */
+    public void put(Put put) throws IOException;
+
+    /**
+     * Transactional version of {@link HTableInterface#delete(Delete delete)}
+     *
+     * @param delete 
+     * @throws IOException 
+     */
+    public void delete(Delete delete) throws IOException;
+    
+    /**
+     * Transactional version of {@link HTableInterface#getScanner(Scan scan)}
+     *
+     * @param scan 
+     * @return ResultScanner
+     * @throws IOException
+     */
+    public ResultScanner getScanner(Scan scan) throws IOException;
+    
+    /**
+     * Returns Htable name
+     */
+    public byte[] getTableName();
+    
+    /**
+     * Returns Htable configuration object
+     */
+    public Configuration getConfiguration();
+
+    /**
+     * Returns HTableDescriptor of Htable
+     * @throws IOException 
+     */
+    public HTableDescriptor getTableDescriptor() throws IOException;
+
+    /**
+     * Checks if cell exists
+     * @throws IOException
+     */
+    public boolean exists(Get get) throws IOException;
+    
+    /**
+     * Transactional version of {@link HTableInterface#get(List gets)}
+     * @throws IOException 
+     */
+    public Result[] get(List<Get> gets) throws IOException;
+    
+    /**
+     * Transactional version of {@link HTableInterface#getScanner(byte[] family)}
+     * @throws IOException 
+     */
+    public ResultScanner getScanner(byte[] family) throws IOException;
+
+    /**
+     * Transactional version of {@link HTableInterface#getScanner(byte[] family, byte[] qualifier)}
+     * @throws IOException 
+     */
+    public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
+
+    /**
+     * Transactional version of {@link HTableInterface#put(List puts)}
+     * @throws IOException 
+     */
+    public void put(List<Put> puts) throws IOException;
+
+    /**
+     * Transactional version of {@link HTableInterface#delete(List deletes)}
+     * @throws IOException
+     */
+    public void delete(List<Delete> deletes) throws IOException;
+    
+    /**
+     * Return the underling htable
+     */
+    public HTableInterface getHTable();
+
+    /**
+     * Delegates to {@link HTable#setAutoFlush(boolean autoFlush)}
+     */
+    public void setAutoFlush(boolean autoFlush);
+
+    /**
+     * Delegates to {@link HTable#isAutoFlush()}
+     */
+    public boolean isAutoFlush();
+
+    /**
+     * Delegates to see HTable.getWriteBufferSize()
+     */
+    public long getWriteBufferSize();
+
+    /**
+     * Delegates to see HTable.setWriteBufferSize()
+     */
+    public void setWriteBufferSize(long writeBufferSize) throws IOException;
+
+    /**
+     * Delegates to see HTable.flushCommits()
+     */
+    public void flushCommits() throws IOException;
+    
+    /**
+     * Releases resources
+     * @throws IOException 
+     */
+    public void close() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c2ca8c5/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
new file mode 100644
index 0000000..cfe8e9b
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -0,0 +1,95 @@
+package org.apache.phoenix.transaction;
+
+import java.sql.SQLException;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.phoenix.schema.PTable;
+
+public class TephraTransactionContext implements PhoenixTransactionContext {
+
+    @Override
+    public void begin() throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void commit() throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void abort() throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void abort(SQLException e) throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void checkpoint() throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void commitDDL(PTable dataTable) throws SQLException,
+            InterruptedException, TimeoutException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void markDML(PTable table) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void join(PhoenixTransactionContext ctx) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void addTransactionTable(PhoenixTransactionalTable table) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void addTransactionToTable(PhoenixTransactionalTable table) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public boolean isTransactionRunning() {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public void reset() {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public long getTransactionId() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public long getReadPointer() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c2ca8c5/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
new file mode 100644
index 0000000..09d37c3
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -0,0 +1,139 @@
+package org.apache.phoenix.transaction;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+
+public class TephraTransactionTable implements PhoenixTransactionalTable {
+
+    @Override
+    public Result get(Get get) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public void put(Put put) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void delete(Delete delete) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public ResultScanner getScanner(Scan scan) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public byte[] getTableName() {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public Configuration getConfiguration() {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public HTableDescriptor getTableDescriptor() throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public boolean exists(Get get) throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public Result[] get(List<Get> gets) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public ResultScanner getScanner(byte[] family) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public ResultScanner getScanner(byte[] family, byte[] qualifier)
+            throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public void put(List<Put> puts) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void delete(List<Delete> deletes) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public HTableInterface getHTable() {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public void setAutoFlush(boolean autoFlush) {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public boolean isAutoFlush() {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public long getWriteBufferSize() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public void setWriteBufferSize(long writeBufferSize) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void flushCommits() throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void close() throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+}


[32/46] phoenix git commit: add a missing import

Posted by td...@apache.org.
add a missing import


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f5b19f16
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f5b19f16
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f5b19f16

Branch: refs/heads/omid
Commit: f5b19f16d44f315cbd8003718168231adcdde5fe
Parents: 34e2d36
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Tue Apr 25 12:00:13 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Tue Apr 25 12:00:13 2017 +0300

----------------------------------------------------------------------
 phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5b19f16/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index f1cf7df..78c510b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -43,6 +43,7 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;


[22/46] phoenix git commit: PHOENIX-3789 Execute cross region index maintenance calls in postBatchMutateIndispensably

Posted by td...@apache.org.
PHOENIX-3789 Execute cross region index maintenance calls in postBatchMutateIndispensably


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bcceaf80
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bcceaf80
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bcceaf80

Branch: refs/heads/omid
Commit: bcceaf80270b75a34a442f3ad1e7490bb88d58d5
Parents: adf5606
Author: James Taylor <ja...@apache.org>
Authored: Mon Apr 17 10:16:36 2017 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Mon Apr 17 10:17:01 2017 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/hbase/index/Indexer.java   | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bcceaf80/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index c482cbd..f485bdf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -386,18 +386,20 @@ public class Indexer extends BaseRegionObserver {
   }
 
   @Override
-  public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
-      MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+  public void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> c,
+      MiniBatchOperationInProgress<Mutation> miniBatchOp, final boolean success) throws IOException {
       if (this.disabled) {
-          super.postBatchMutate(c, miniBatchOp);
+          super.postBatchMutateIndispensably(c, miniBatchOp, success);
           return;
         }
     this.builder.batchCompleted(miniBatchOp);
-
-    //each batch operation, only the first one will have anything useful, so we can just grab that
-    Mutation mutation = miniBatchOp.getOperation(0);
-    WALEdit edit = miniBatchOp.getWalEdit(0);
-    doPost(edit, mutation, mutation.getDurability(), false);
+    
+    if (success) { // if miniBatchOp was successfully written, write index updates
+        //each batch operation, only the first one will have anything useful, so we can just grab that
+        Mutation mutation = miniBatchOp.getOperation(0);
+        WALEdit edit = miniBatchOp.getWalEdit(0);
+        doPost(edit, mutation, mutation.getDurability(), false);
+    }
   }
 
   private void doPost(WALEdit edit, Mutation m, final Durability durability, boolean allowLocalUpdates) throws IOException {


[44/46] phoenix git commit: Merge branch 'master' into ConnectTALtoPhoenix

Posted by td...@apache.org.
Merge branch 'master' into ConnectTALtoPhoenix


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c840cc98
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c840cc98
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c840cc98

Branch: refs/heads/omid
Commit: c840cc98fbf067551df4a3d78fa0c40d3629c820
Parents: 46e4b1a cd84de3
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Tue May 9 10:20:42 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Tue May 9 10:20:42 2017 +0300

----------------------------------------------------------------------
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  |  93 ++++--
 .../apache/phoenix/compile/UpsertCompiler.java  |  14 +-
 .../UngroupedAggregateRegionObserver.java       |  14 +-
 .../hbase/index/covered/data/LocalTable.java    |  22 +-
 .../covered/update/IndexUpdateManager.java      |  12 +-
 .../index/covered/update/SortedCollection.java  | 128 --------
 .../org/apache/phoenix/query/QueryServices.java |   3 +
 .../phoenix/query/QueryServicesOptions.java     |   4 +-
 .../org/apache/phoenix/util/ExpressionUtil.java |  14 +
 .../index/covered/TestNonTxIndexBuilder.java    | 317 +++++++++++++++++++
 .../index/covered/data/TestLocalTable.java      |  63 ++++
 11 files changed, 506 insertions(+), 178 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c840cc98/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index ea18401,49ef884..79634e8
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@@ -110,9 -110,9 +110,10 @@@ import org.apache.phoenix.schema.types.
  import org.apache.phoenix.schema.types.PDouble;
  import org.apache.phoenix.schema.types.PFloat;
  import org.apache.phoenix.schema.types.PLong;
 +import org.apache.phoenix.transaction.PhoenixTransactionContext;
  import org.apache.phoenix.util.ByteUtil;
  import org.apache.phoenix.util.EncodedColumnsUtil;
+ import org.apache.phoenix.util.ExpressionUtil;
  import org.apache.phoenix.util.IndexUtil;
  import org.apache.phoenix.util.KeyValueUtil;
  import org.apache.phoenix.util.LogUtil;


[46/46] phoenix git commit: Fix naming convension to getFamilyDeleteMarker

Posted by td...@apache.org.
Fix naming convension to getFamilyDeleteMarker


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/476d13d5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/476d13d5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/476d13d5

Branch: refs/heads/omid
Commit: 476d13d53733b1a9518db1babe25cd303f5aea58
Parents: ffac47e
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Wed May 10 14:46:47 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Wed May 10 14:46:47 2017 +0300

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/index/IndexMaintainer.java  | 2 +-
 .../src/main/java/org/apache/phoenix/schema/PTableImpl.java      | 4 ++--
 .../org/apache/phoenix/transaction/OmidTransactionContext.java   | 2 +-
 .../apache/phoenix/transaction/PhoenixTransactionContext.java    | 2 +-
 .../org/apache/phoenix/transaction/TephraTransactionContext.java | 2 +-
 5 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/476d13d5/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index ab5b946..43e2bde 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -1064,7 +1064,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
             }
         	else if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()
         			// Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor
-        			|| (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionFactory().getTransactionContext().get_famility_delete_marker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) {
+        			|| (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionFactory().getTransactionContext().getFamilyDeleteMarker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) {
         	    nDeleteCF++;
         	}
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/476d13d5/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 3c1a01b..9710356 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -1023,11 +1023,11 @@ public class PTableImpl implements PTable {
             if (PTableImpl.this.isTransactional()) {
                 Put put = new Put(key);
                 if (families.isEmpty()) {
-                    put.add(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), TransactionFactory.getTransactionFactory().getTransactionContext().get_famility_delete_marker(), ts,
+                    put.add(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), TransactionFactory.getTransactionFactory().getTransactionContext().getFamilyDeleteMarker(), ts,
                             HConstants.EMPTY_BYTE_ARRAY);
                 } else {
                     for (PColumnFamily colFamily : families) {
-                        put.add(colFamily.getName().getBytes(), TransactionFactory.getTransactionFactory().getTransactionContext().get_famility_delete_marker(), ts,
+                        put.add(colFamily.getName().getBytes(), TransactionFactory.getTransactionFactory().getTransactionContext().getFamilyDeleteMarker(), ts,
                                 HConstants.EMPTY_BYTE_ARRAY);
                     }
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/476d13d5/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index 2ae95bb..e217c28 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -132,7 +132,7 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public byte[] get_famility_delete_marker() {
+    public byte[] getFamilyDeleteMarker() {
         // TODO Auto-generated method stub
         return null;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/476d13d5/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index a13b5a6..032b2d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -156,7 +156,7 @@ public interface PhoenixTransactionContext {
      * 
      * @return the family delete marker
      */
-    public byte[] get_famility_delete_marker(); 
+    public byte[] getFamilyDeleteMarker();
 
     /**
      * Setup transaction manager's configuration for testing

http://git-wip-us.apache.org/repos/asf/phoenix/blob/476d13d5/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 5c4178d..ad01309 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -415,7 +415,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public byte[] get_famility_delete_marker() { 
+    public byte[] getFamilyDeleteMarker() {
         return TxConstants.FAMILY_DELETE_QUALIFIER;
     }
 


[15/46] phoenix git commit: PHOENIX-3745 SortMergeJoin might incorrectly override the OrderBy of LHS or RHS

Posted by td...@apache.org.
PHOENIX-3745 SortMergeJoin might incorrectly override the OrderBy of LHS or RHS


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2c53fc98
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2c53fc98
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2c53fc98

Branch: refs/heads/omid
Commit: 2c53fc9856ba3770e742c0729cdef9b2c0181873
Parents: 2074d1f
Author: chenglei <ch...@apache.org>
Authored: Fri Mar 31 09:58:35 2017 +0800
Committer: chenglei <ch...@apache.org>
Committed: Fri Mar 31 09:58:35 2017 +0800

----------------------------------------------------------------------
 .../phoenix/end2end/SortMergeJoinMoreIT.java    | 135 +++++++++++++
 .../apache/phoenix/compile/JoinCompiler.java    |   6 +-
 .../phoenix/compile/SubselectRewriter.java      | 137 ++++++++++++--
 .../phoenix/exception/SQLExceptionCode.java     |   1 +
 .../phoenix/execute/DelegateQueryPlan.java      |   4 +
 .../phoenix/execute/SortMergeJoinPlan.java      |   8 +
 .../phoenix/compile/QueryCompilerTest.java      | 187 +++++++++++++++++++
 7 files changed, 466 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c53fc98/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinMoreIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinMoreIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinMoreIT.java
index 992e55f..e61332b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinMoreIT.java
@@ -499,4 +499,139 @@ public class SortMergeJoinMoreIT extends ParallelStatsDisabledIT {
             conn.close();
         }
     }
+
+    @Test
+    public void testSubQueryOrderByOverrideBug3745() throws Exception {
+        Connection conn = null;
+        try {
+            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+            conn = DriverManager.getConnection(getUrl(), props);
+
+            String tableName1=generateUniqueName();
+            String tableName2=generateUniqueName();
+
+            conn.createStatement().execute("DROP TABLE if exists "+tableName1);
+
+            String sql="CREATE TABLE IF NOT EXISTS "+tableName1+" ( "+
+                    "AID INTEGER PRIMARY KEY,"+
+                    "AGE INTEGER"+
+                    ")";
+            conn.createStatement().execute(sql);
+
+            conn.createStatement().execute("UPSERT INTO "+tableName1+"(AID,AGE) VALUES (1,11)");
+            conn.createStatement().execute("UPSERT INTO "+tableName1+"(AID,AGE) VALUES (2,22)");
+            conn.createStatement().execute("UPSERT INTO "+tableName1+"(AID,AGE) VALUES (3,33)");
+            conn.commit();
+
+            conn.createStatement().execute("DROP TABLE if exists "+tableName2);
+            sql="CREATE TABLE IF NOT EXISTS "+tableName2+" ( "+
+                    "BID INTEGER PRIMARY KEY,"+
+                    "CODE INTEGER"+
+                    ")";
+            conn.createStatement().execute(sql);
+
+            conn.createStatement().execute("UPSERT INTO "+tableName2+"(BID,CODE) VALUES (1,66)");
+            conn.createStatement().execute("UPSERT INTO "+tableName2+"(BID,CODE) VALUES (2,55)");
+            conn.createStatement().execute("UPSERT INTO "+tableName2+"(BID,CODE) VALUES (3,44)");
+            conn.commit();
+
+            //test for simple scan
+            sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33) a inner join "+
+                "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid ";
+
+            ResultSet rs=conn.prepareStatement(sql).executeQuery();
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 3);
+            assertTrue(rs.getInt(2) == 44);
+            assertTrue(!rs.next());
+
+            sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33) a inner join "+
+                "(select bid,code from "+tableName2+" order by code limit 2) b on a.aid=b.bid ";
+            rs=conn.prepareStatement(sql).executeQuery();
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 2);
+            assertTrue(rs.getInt(2) == 55);
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 3);
+            assertTrue(rs.getInt(2) == 44);
+            assertTrue(!rs.next());
+
+            //test for aggregate
+            sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.codesum from (select aid,sum(age) agesum from "+tableName1+" where age >=11 and age<=33 group by aid order by agesum limit 3) a inner join "+
+                "(select bid,sum(code) codesum from "+tableName2+" group by bid order by codesum limit 2) b on a.aid=b.bid ";
+            rs=conn.prepareStatement(sql).executeQuery();
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 2);
+            assertTrue(rs.getInt(2) == 55);
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 3);
+            assertTrue(rs.getInt(2) == 44);
+            assertTrue(!rs.next());
+
+            String tableName3=generateUniqueName();;
+            conn.createStatement().execute("DROP TABLE if exists "+tableName3);
+            sql="CREATE TABLE IF NOT EXISTS "+tableName3+" ( "+
+                    "CID INTEGER PRIMARY KEY,"+
+                    "REGION INTEGER"+
+                    ")";
+            conn.createStatement().execute(sql);
+
+            conn.createStatement().execute("UPSERT INTO "+tableName3+"(CID,REGION) VALUES (1,77)");
+            conn.createStatement().execute("UPSERT INTO "+tableName3+"(CID,REGION) VALUES (2,88)");
+            conn.createStatement().execute("UPSERT INTO "+tableName3+"(CID,REGION) VALUES (3,99)");
+            conn.commit();
+
+            //test for join
+            sql="select t1.aid,t1.code,t2.region from "+
+                "(select a.aid,b.code from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 order by b.code limit 3) t1 inner join "+
+                "(select a.aid,c.region from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 order by c.region desc limit 1) t2 on t1.aid=t2.aid";
+
+            rs=conn.prepareStatement(sql).executeQuery();
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 3);
+            assertTrue(rs.getInt(2) == 44);
+            assertTrue(rs.getInt(3) == 99);
+            assertTrue(!rs.next());
+
+            //test for join and aggregate
+            sql="select t1.aid,t1.codesum,t2.regionsum from "+
+                "(select a.aid,sum(b.code) codesum from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by codesum limit 3) t1 inner join "+
+                "(select a.aid,sum(c.region) regionsum from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by regionsum desc limit 2) t2 on t1.aid=t2.aid";
+
+            rs=conn.prepareStatement(sql).executeQuery();
+
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 2);
+            assertTrue(rs.getInt(2) == 55);
+            assertTrue(rs.getInt(3) == 88);
+
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 3);
+            assertTrue(rs.getInt(2) == 44);
+            assertTrue(rs.getInt(3) == 99);
+            assertTrue(!rs.next());
+
+            //test for if SubselectRewriter.isOrderByPrefix had take effect
+            sql="select t1.aid,t1.codesum,t2.regionsum from "+
+                "(select a.aid,sum(b.code) codesum from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by a.aid,codesum limit 3) t1 inner join "+
+                "(select a.aid,sum(c.region) regionsum from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by a.aid desc,regionsum desc limit 2) t2 on t1.aid=t2.aid "+
+                "order by t1.aid desc";
+
+            rs=conn.prepareStatement(sql).executeQuery();
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 3);
+            assertTrue(rs.getInt(2) == 44);
+            assertTrue(rs.getInt(3) == 99);
+
+            assertTrue(rs.next());
+            assertTrue(rs.getInt(1) == 2);
+            assertTrue(rs.getInt(2) == 55);
+            assertTrue(rs.getInt(3) == 88);
+            assertTrue(!rs.next());
+        } finally {
+            if(conn!=null) {
+                conn.close();
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c53fc98/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index eef604b..b1da739 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -691,7 +691,11 @@ public class JoinCompiler {
 
         public SelectStatement getAsSubquery(List<OrderByNode> orderBy) throws SQLException {
             if (isSubselect())
-                return SubselectRewriter.applyOrderBy(SubselectRewriter.applyPostFilters(subselect, preFilters, tableNode.getAlias()), orderBy, tableNode.getAlias());
+                return SubselectRewriter.applyOrderBy(
+                        SubselectRewriter.applyPostFilters(subselect, preFilters, tableNode.getAlias()),
+                        orderBy,
+                        tableNode.getAlias(),
+                        tableNode);
 
             return NODE_FACTORY.select(tableNode, select.getHint(), false, selectNodes, getPreFiltersCombined(), null,
                     null, orderBy, null, null, 0, false, select.hasSequence(),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c53fc98/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
index db809c8..a926e06 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
@@ -19,11 +19,16 @@
 package org.apache.phoenix.compile;
 
 import java.sql.SQLException;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.parse.AliasedNode;
 import org.apache.phoenix.parse.ColumnParseNode;
@@ -60,11 +65,11 @@ public class SubselectRewriter extends ParseNodeRewriter {
         return statement.getLimit() == null && (!statement.isAggregate() || !statement.getGroupBy().isEmpty());        
     }
     
-    public static SelectStatement applyOrderBy(SelectStatement statement, List<OrderByNode> orderBy, String subqueryAlias) throws SQLException {
+    public static SelectStatement applyOrderBy(SelectStatement statement, List<OrderByNode> orderBy, String subqueryAlias,TableNode tableNode) throws SQLException {
         if (orderBy == null)
             return statement;
         
-        return new SubselectRewriter(null, statement.getSelect(), subqueryAlias).applyOrderBy(statement, orderBy);
+        return new SubselectRewriter(null, statement.getSelect(), subqueryAlias).applyOrderBy(statement, orderBy, tableNode);
     }
     
     public static SelectStatement flatten(SelectStatement select, PhoenixConnection connection) throws SQLException {
@@ -248,17 +253,127 @@ public class SubselectRewriter extends ParseNodeRewriter {
         }
         return NODE_FACTORY.select(statement, statement.getWhere(), combine(postFiltersRewrite));
     }
-    
-    private SelectStatement applyOrderBy(SelectStatement statement, List<OrderByNode> orderBy) throws SQLException {
-        List<OrderByNode> orderByRewrite = Lists.<OrderByNode> newArrayListWithExpectedSize(orderBy.size());
-        for (OrderByNode orderByNode : orderBy) {
-            ParseNode node = orderByNode.getNode();
-            orderByRewrite.add(NODE_FACTORY.orderBy(node.accept(this), orderByNode.isNullsLast(), orderByNode.isAscending()));
+
+    private SelectStatement applyOrderBy(SelectStatement subselectStatement,List<OrderByNode> newOrderByNodes, TableNode subselectAsTableNode) throws SQLException {
+        ArrayList<OrderByNode> rewrittenNewOrderByNodes = Lists.<OrderByNode> newArrayListWithExpectedSize(newOrderByNodes.size());
+        for (OrderByNode newOrderByNode : newOrderByNodes) {
+            ParseNode parseNode = newOrderByNode.getNode();
+            rewrittenNewOrderByNodes.add(NODE_FACTORY.orderBy(
+                    parseNode.accept(this),
+                    newOrderByNode.isNullsLast(),
+                    newOrderByNode.isAscending()));
         }
-        
-        return NODE_FACTORY.select(statement, orderByRewrite);
+
+        // in these case,we can safely override subselect's orderBy
+        if(subselectStatement.getLimit()==null ||
+           subselectStatement.getOrderBy() == null ||
+           subselectStatement.getOrderBy().isEmpty()) {
+            return NODE_FACTORY.select(subselectStatement, rewrittenNewOrderByNodes);
+        }
+
+        //if rewrittenNewOrderByNodes is prefix of subselectStatement's orderBy,
+        //then subselectStatement no need to modify
+        if(this.isOrderByPrefix(subselectStatement, rewrittenNewOrderByNodes)) {
+            return subselectStatement;
+        }
+
+        //modify the subselect "(select id,code from tableName order by code limit 3) as a" to
+        //"(select id,code from (select id,code from tableName order by code limit 3) order by id) as a"
+        List<AliasedNode> newSelectAliasedNodes = createAliasedNodesFromSubselect(subselectStatement,rewrittenNewOrderByNodes);
+        assert subselectAsTableNode instanceof DerivedTableNode;
+        //set the subselect alias to null.
+        subselectAsTableNode=NODE_FACTORY.derivedTable(null, ((DerivedTableNode)subselectAsTableNode).getSelect());
+
+        return NODE_FACTORY.select(
+                subselectAsTableNode,
+                HintNode.EMPTY_HINT_NODE,
+                false,
+                newSelectAliasedNodes,
+                null,
+                null,
+                null,
+                rewrittenNewOrderByNodes,
+                null,
+                null,
+                0,
+                false,
+                subselectStatement.hasSequence(),
+                Collections.<SelectStatement> emptyList(),
+                subselectStatement.getUdfParseNodes());
     }
-    
+
+    /**
+     * create new aliasedNodes from subSelectStatement's select alias.
+     * @param subSelectStatement
+     * @param rewrittenOrderByNodes
+     * @return
+     */
+    private List<AliasedNode> createAliasedNodesFromSubselect(SelectStatement subSelectStatement,ArrayList<OrderByNode> rewrittenOrderByNodes) throws SQLException {
+        List<AliasedNode> selectAliasedNodes=subSelectStatement.getSelect();
+        List<AliasedNode> newSelectAliasedNodes = new ArrayList<AliasedNode>(selectAliasedNodes.size());
+        Map<ParseNode,Integer> rewrittenOrderByParseNodeToIndex=new HashMap<ParseNode, Integer>(rewrittenOrderByNodes.size());
+        for(int index=0;index < rewrittenOrderByNodes.size();index++) {
+            OrderByNode rewrittenOrderByNode=rewrittenOrderByNodes.get(index);
+            rewrittenOrderByParseNodeToIndex.put(rewrittenOrderByNode.getNode(), Integer.valueOf(index));
+        }
+
+        for (AliasedNode selectAliasedNode : selectAliasedNodes) {
+            String selectAliasName = selectAliasedNode.getAlias();
+            ParseNode oldSelectAliasParseNode = selectAliasedNode.getNode();
+            if (selectAliasName == null) {
+                selectAliasName = SchemaUtil.normalizeIdentifier(oldSelectAliasParseNode.getAlias());
+            }
+            //in order to convert the subselect "select id,sum(code) codesum from table group by id order by codesum limit 3"
+            //to "select id,codesum from (select id,sum(code) codesum from table group by id order by codesum limit 3) order by id"
+            //we must has alias for sum(code)
+            if(selectAliasName== null) {
+                 throw new SQLExceptionInfo.Builder(SQLExceptionCode.SUBQUERY_SELECT_LIST_COLUMN_MUST_HAS_ALIAS)
+                 .setMessage("the subquery is:"+subSelectStatement)
+                 .build()
+                 .buildException();
+            }
+
+            ColumnParseNode newColumnParseNode=NODE_FACTORY.column(null, selectAliasName, selectAliasName);
+            Integer index=rewrittenOrderByParseNodeToIndex.get(oldSelectAliasParseNode);
+            if(index !=null) {
+                //replace the rewrittenOrderByNode's child to newColumnParseNode
+                OrderByNode oldOrderByNode=rewrittenOrderByNodes.get(index);
+                rewrittenOrderByNodes.set(index,
+                        NODE_FACTORY.orderBy(
+                                newColumnParseNode,
+                                oldOrderByNode.isNullsLast(),
+                                oldOrderByNode.isAscending()));
+            }
+
+            AliasedNode newSelectAliasNode=NODE_FACTORY.aliasedNode(null,newColumnParseNode);
+            newSelectAliasedNodes.add(newSelectAliasNode);
+        }
+        return newSelectAliasedNodes;
+    }
+
+    /**
+     * check if rewrittenNewOrderByNodes is prefix of selectStatement's order by.
+     * @param selectStatement
+     * @param rewrittenNewOrderByNodes
+     * @return
+     */
+    private boolean isOrderByPrefix(SelectStatement selectStatement,List<OrderByNode> rewrittenNewOrderByNodes) {
+        List<OrderByNode> existingOrderByNodes=selectStatement.getOrderBy();
+        if(rewrittenNewOrderByNodes.size() > existingOrderByNodes.size()) {
+            return false;
+        }
+
+        Iterator<OrderByNode> existingOrderByNodeIter=existingOrderByNodes.iterator();
+        for(OrderByNode rewrittenNewOrderByNode : rewrittenNewOrderByNodes) {
+            assert existingOrderByNodeIter.hasNext();
+            OrderByNode existingOrderByNode=existingOrderByNodeIter.next();
+            if(!existingOrderByNode.equals(rewrittenNewOrderByNode)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
     @Override
     public ParseNode visit(ColumnParseNode node) throws SQLException {
         if (node.getTableName() == null)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c53fc98/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 1e48640..2836c45 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -87,6 +87,7 @@ public enum SQLExceptionCode {
     SUBQUERY_RETURNS_DIFFERENT_NUMBER_OF_FIELDS(216, "22016", "Sub-query must return the same number of fields as the left-hand-side expression of 'IN'."),
     AMBIGUOUS_JOIN_CONDITION(217, "22017", "Ambiguous or non-equi join condition specified. Consider using table list with where clause."),
     CONSTRAINT_VIOLATION(218, "23018", "Constraint violation."),
+    SUBQUERY_SELECT_LIST_COLUMN_MUST_HAS_ALIAS(219,"23019","Every column in subquery select lists must has alias when used for join."),
 
     CONCURRENT_TABLE_MUTATION(301, "23000", "Concurrent modification to table.", new Factory() {
         @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c53fc98/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
index 46eec91..015b8f9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
@@ -137,4 +137,8 @@ public abstract class DelegateQueryPlan implements QueryPlan {
      public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
          return iterator(scanGrouper, null);
      }
+
+    public QueryPlan getDelegate() {
+        return delegate;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c53fc98/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
index 8913f3b..75bd11c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
@@ -671,4 +671,12 @@ public class SortMergeJoinPlan implements QueryPlan {
     public Set<TableRef> getSourceRefs() {
         return tableRefs;
     }
+
+    public QueryPlan getLhsPlan() {
+        return lhsPlan;
+    }
+
+    public QueryPlan getRhsPlan() {
+        return rhsPlan;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c53fc98/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index fa270af..4bc7d2b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -51,7 +51,12 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.execute.AggregatePlan;
+import org.apache.phoenix.execute.ClientScanPlan;
 import org.apache.phoenix.execute.HashJoinPlan;
+import org.apache.phoenix.execute.ScanPlan;
+import org.apache.phoenix.execute.SortMergeJoinPlan;
+import org.apache.phoenix.execute.TupleProjectionPlan;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.aggregator.Aggregator;
@@ -3947,4 +3952,186 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
         queryPlan.iterator();
         return queryPlan;
     }
+
+    @Test
+    public void testSortMergeJoinSubQueryOrderByOverrideBug3745() throws Exception {
+        Connection conn = null;
+        try {
+            conn= DriverManager.getConnection(getUrl());
+
+            String tableName1="MERGE1";
+            String tableName2="MERGE2";
+
+            conn.createStatement().execute("DROP TABLE if exists "+tableName1);
+
+            String sql="CREATE TABLE IF NOT EXISTS "+tableName1+" ( "+
+                    "AID INTEGER PRIMARY KEY,"+
+                    "AGE INTEGER"+
+                    ")";
+            conn.createStatement().execute(sql);
+
+            conn.createStatement().execute("DROP TABLE if exists "+tableName2);
+            sql="CREATE TABLE IF NOT EXISTS "+tableName2+" ( "+
+                    "BID INTEGER PRIMARY KEY,"+
+                    "CODE INTEGER"+
+                    ")";
+            conn.createStatement().execute(sql);
+
+            //test for simple scan
+            sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+
+                    "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid ";
+
+            QueryPlan queryPlan=getQueryPlan(conn, sql);
+            SortMergeJoinPlan sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate();
+
+            ClientScanPlan lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate();
+            OrderBy orderBy=lhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID"));
+            ScanPlan innerScanPlan=(ScanPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate();
+            orderBy=innerScanPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AGE"));
+            assertTrue(innerScanPlan.getLimit().intValue() == 3);
+
+            ClientScanPlan rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate();
+            orderBy=rhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID"));
+            innerScanPlan=(ScanPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate();
+            orderBy=innerScanPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("CODE"));
+            assertTrue(innerScanPlan.getLimit().intValue() == 1);
+
+            //test for aggregate
+            sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.codesum from (select aid,sum(age) agesum from "+tableName1+" where age >=11 and age<=33 group by aid order by agesum limit 3) a inner join "+
+                    "(select bid,sum(code) codesum from "+tableName2+" group by bid order by codesum limit 1) b on a.aid=b.bid ";
+
+
+            queryPlan=getQueryPlan(conn, sql);
+            sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate();
+
+            lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate();
+            orderBy=lhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID"));
+            AggregatePlan innerAggregatePlan=(AggregatePlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate();
+            orderBy=innerAggregatePlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(AGE)"));
+            assertTrue(innerAggregatePlan.getLimit().intValue() == 3);
+
+            rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate();
+            orderBy=rhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID"));
+            innerAggregatePlan=(AggregatePlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate();
+            orderBy=innerAggregatePlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(CODE)"));
+            assertTrue(innerAggregatePlan.getLimit().intValue() == 1);
+
+            String tableName3="merge3";
+            conn.createStatement().execute("DROP TABLE if exists "+tableName3);
+            sql="CREATE TABLE IF NOT EXISTS "+tableName3+" ( "+
+                    "CID INTEGER PRIMARY KEY,"+
+                    "REGION INTEGER"+
+                    ")";
+            conn.createStatement().execute(sql);
+
+            //test for join
+            sql="select t1.aid,t1.code,t2.region from "+
+                "(select a.aid,b.code from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 order by b.code limit 3) t1 inner join "+
+                "(select a.aid,c.region from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 order by c.region desc limit 1) t2 on t1.aid=t2.aid";
+
+            PhoenixPreparedStatement phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class);
+            queryPlan = phoenixPreparedStatement.optimizeQuery(sql);
+            sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate();
+
+            lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate();
+            orderBy=lhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID"));
+            innerScanPlan=(ScanPlan)((HashJoinPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate()).getDelegate();
+            orderBy=innerScanPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("B.CODE"));
+            assertTrue(innerScanPlan.getLimit().intValue() == 3);
+
+            rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate();
+            orderBy=rhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID"));
+            innerScanPlan=(ScanPlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate();
+            orderBy=innerScanPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("C.REGION DESC"));
+            assertTrue(innerScanPlan.getLimit().intValue() == 1);
+
+            //test for join and aggregate
+            sql="select t1.aid,t1.codesum,t2.regionsum from "+
+                "(select a.aid,sum(b.code) codesum from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by codesum limit 3) t1 inner join "+
+                "(select a.aid,sum(c.region) regionsum from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by regionsum desc limit 2) t2 on t1.aid=t2.aid";
+
+            phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class);
+            queryPlan = phoenixPreparedStatement.optimizeQuery(sql);
+            sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate();
+
+            lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate();
+            orderBy=lhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID"));
+            innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate()).getDelegate();
+            orderBy=innerAggregatePlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(B.CODE)"));
+            assertTrue(innerAggregatePlan.getLimit().intValue() == 3);
+
+            rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate();
+            orderBy=rhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID"));
+            innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate();
+            orderBy=innerAggregatePlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(C.REGION) DESC"));
+            assertTrue(innerAggregatePlan.getLimit().intValue() == 2);
+
+            //test for if SubselectRewriter.isOrderByPrefix had take effect
+            sql="select t1.aid,t1.codesum,t2.regionsum from "+
+                "(select a.aid,sum(b.code) codesum from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by a.aid,codesum limit 3) t1 inner join "+
+                "(select a.aid,sum(c.region) regionsum from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by a.aid desc,regionsum desc limit 2) t2 on t1.aid=t2.aid "+
+                 "order by t1.aid desc";
+
+            phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class);
+            queryPlan = phoenixPreparedStatement.optimizeQuery(sql);
+            orderBy=queryPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("T1.AID DESC"));
+            sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate();
+
+            innerAggregatePlan=(AggregatePlan)((HashJoinPlan)(((TupleProjectionPlan)sortMergeJoinPlan.getLhsPlan()).getDelegate())).getDelegate();
+            orderBy=innerAggregatePlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 2);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID"));
+            assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(B.CODE)"));
+            assertTrue(innerAggregatePlan.getLimit().intValue() == 3);
+
+            rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate();
+            orderBy=rhsOuterPlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 1);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID"));
+            innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate();
+            orderBy=innerAggregatePlan.getOrderBy();
+            assertTrue(orderBy.getOrderByExpressions().size() == 2);
+            assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID DESC"));
+            assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(C.REGION) DESC"));
+            assertTrue(innerAggregatePlan.getLimit().intValue() == 2);
+        } finally {
+            if(conn!=null) {
+                conn.close();
+            }
+        }
+    }
 }


[09/46] phoenix git commit: Saving partial results

Posted by td...@apache.org.
Saving partial results


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fa69563e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fa69563e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fa69563e

Branch: refs/heads/omid
Commit: fa69563e51fbebdf14d5af610506dd56b8289ec4
Parents: d2c1653
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Mon Mar 13 12:22:51 2017 +0200
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Mon Mar 13 12:22:51 2017 +0200

----------------------------------------------------------------------
 .../apache/phoenix/execute/MutationState.java   | 309 +++++++------------
 .../transaction/OmidTransactionContext.java     |  15 +-
 .../transaction/PhoenixTransactionContext.java  |  27 +-
 .../transaction/TephraTransactionContext.java   | 112 ++++---
 .../transaction/TephraTransactionTable.java     |   7 +-
 .../apache/phoenix/util/TransactionUtil.java    |  22 +-
 6 files changed, 230 insertions(+), 262 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa69563e/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 4775d59..c480e30 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -84,6 +84,10 @@ import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.ValueSchema.Field;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.trace.util.Tracing;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
+import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
+import org.apache.phoenix.transaction.PhoenixTransactionalTable;
+import org.apache.phoenix.transaction.TephraTransactionContext;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
@@ -123,54 +127,53 @@ public class MutationState implements SQLCloseable {
     private static final TransactionCodec CODEC = new TransactionCodec();
     private static final int[] EMPTY_STATEMENT_INDEX_ARRAY = new int[0];
     private static final int MAX_COMMIT_RETRIES = 3;
-    
+
     private final PhoenixConnection connection;
     private final long maxSize;
     private final long maxSizeBytes;
     private long batchCount = 0L;
     private final Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> mutations;
-    private final List<TransactionAware> txAwares;
-    private final TransactionContext txContext;
     private final Set<String> uncommittedPhysicalNames = Sets.newHashSetWithExpectedSize(10);
-    
-    private Transaction tx;
+
     private long sizeOffset;
     private int numRows = 0;
     private int[] uncommittedStatementIndexes = EMPTY_STATEMENT_INDEX_ARRAY;
     private boolean isExternalTxContext = false;
     private Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> txMutations = Collections.emptyMap();
-    
+
+    final PhoenixTransactionContext phoenixTransactionContext;
+
     private final MutationMetricQueue mutationMetricQueue;
     private ReadMetricQueue readMetricQueue;
 
     public MutationState(long maxSize, PhoenixConnection connection) {
-        this(maxSize,connection, null, null);
+        this(maxSize,connection, false, null);
     }
-    
-    public MutationState(long maxSize, PhoenixConnection connection, TransactionContext txContext) {
-        this(maxSize,connection, null, txContext);
+
+    public MutationState(long maxSize, PhoenixConnection connection, PhoenixTransactionContext txContext) {
+        this(maxSize,connection, false, txContext);
     }
-    
+
     public MutationState(MutationState mutationState) {
-        this(mutationState.maxSize, mutationState.connection, mutationState.getTransaction(), null);
+        this(mutationState.maxSize, mutationState.connection, true, mutationState.getPhoenixTransactionContext());
     }
-    
+
     public MutationState(long maxSize, PhoenixConnection connection, long sizeOffset) {
-        this(maxSize, connection, null, null, sizeOffset);
+        this(maxSize, connection, false, null, sizeOffset);
     }
-    
-    private MutationState(long maxSize, PhoenixConnection connection, Transaction tx, TransactionContext txContext) {
-        this(maxSize,connection, tx, txContext, 0);
+
+    private MutationState(long maxSize, PhoenixConnection connection, boolean subTask, PhoenixTransactionContext txContext) {
+        this(maxSize,connection, subTask, txContext, 0);
     }
-    
-    private MutationState(long maxSize, PhoenixConnection connection, Transaction tx, TransactionContext txContext, long sizeOffset) {
-        this(maxSize, connection, Maps.<TableRef, Map<ImmutableBytesPtr,RowMutationState>>newHashMapWithExpectedSize(5), tx, txContext);
+
+    private MutationState(long maxSize, PhoenixConnection connection, boolean subTask, PhoenixTransactionContext txContext, long sizeOffset) {
+        this(maxSize, connection, Maps.<TableRef, Map<ImmutableBytesPtr,RowMutationState>>newHashMapWithExpectedSize(5), subTask, txContext);
         this.sizeOffset = sizeOffset;
     }
-    
+
     MutationState(long maxSize, PhoenixConnection connection,
             Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> mutations,
-            Transaction tx, TransactionContext txContext) {
+            boolean subTask, PhoenixTransactionContext txContext) {
         this.maxSize = maxSize;
         this.connection = connection;
         this.maxSizeBytes = connection.getMutateBatchSizeBytes();
@@ -178,30 +181,24 @@ public class MutationState implements SQLCloseable {
         boolean isMetricsEnabled = connection.isRequestLevelMetricsEnabled();
         this.mutationMetricQueue = isMetricsEnabled ? new MutationMetricQueue()
                 : NoOpMutationMetricsQueue.NO_OP_MUTATION_METRICS_QUEUE;
-        this.tx = tx;
-        if (tx == null) {
-            this.txAwares = Collections.emptyList();
+        if (subTask == false) {
             if (txContext == null) {
-                TransactionSystemClient txServiceClient = this.connection
-                        .getQueryServices().getTransactionSystemClient();
-                this.txContext = new TransactionContext(txServiceClient);
+                phoenixTransactionContext = new TephraTransactionContext(connection);
             } else {
                 isExternalTxContext = true;
-                this.txContext = txContext;
+                phoenixTransactionContext = new TephraTransactionContext(txContext, connection, subTask);
             }
         } else {
             // this code path is only used while running child scans, we can't pass the txContext to child scans
             // as it is not thread safe, so we use the tx member variable
-            this.txAwares = Lists.newArrayList();
-            this.txContext = null;
+            phoenixTransactionContext = new TephraTransactionContext(txContext, connection, subTask);
         }
     }
 
     public MutationState(TableRef table, Map<ImmutableBytesPtr,RowMutationState> mutations, long sizeOffset, long maxSize, PhoenixConnection connection) {
-        this(maxSize, connection, null, null, sizeOffset);
+        this(maxSize, connection, true, connection.getMutationState().getPhoenixTransactionContext(), sizeOffset);
         this.mutations.put(table, mutations);
         this.numRows = mutations.size();
-        this.tx = connection.getMutationState().getTransaction();
         throwIfTooBig();
     }
     
@@ -209,6 +206,10 @@ public class MutationState implements SQLCloseable {
         return maxSize;
     }
     
+    public PhoenixTransactionContext getPhoenixTransactionContext() {
+        return phoenixTransactionContext;
+    }
+    
     /**
      * Commit a write fence when creating an index so that we can detect
      * when a data table transaction is started before the create index
@@ -219,33 +220,16 @@ public class MutationState implements SQLCloseable {
      * @param dataTable the data table upon which an index is being added
      * @throws SQLException
      */
-    public void commitDDLFence(PTable dataTable) throws SQLException {
+    public void commitDDLFence(PTable dataTable, Logger logger) throws SQLException {
         if (dataTable.isTransactional()) {
-            byte[] key = dataTable.getName().getBytes();
-            boolean success = false;
             try {
-                FenceWait fenceWait = VisibilityFence.prepareWait(key, connection.getQueryServices().getTransactionSystemClient());
-                fenceWait.await(10000, TimeUnit.MILLISECONDS);
-                success = true;
-            } catch (InterruptedException e) {
-                Thread.currentThread().interrupt();
-                throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
-            } catch (TimeoutException | TransactionFailureException e) {
-                throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_UNABLE_TO_GET_WRITE_FENCE)
-                .setSchemaName(dataTable.getSchemaName().getString())
-                .setTableName(dataTable.getTableName().getString())
-                .build().buildException();
+                phoenixTransactionContext.commitDDLFence(dataTable, logger);
             } finally {
                 // The client expects a transaction to be in progress on the txContext while the
                 // VisibilityFence.prepareWait() starts a new tx and finishes/aborts it. After it's
                 // finished, we start a new one here.
                 // TODO: seems like an autonomous tx capability in Tephra would be useful here.
-                try {
-                    txContext.start();
-                    if (logger.isInfoEnabled() && success) logger.info("Added write fence at ~" + getTransaction().getReadPointer());
-                } catch (TransactionFailureException e) {
-                    throw TransactionUtil.getTransactionFailureException(e);
-                }
+                phoenixTransactionContext.begin();
             }
         }
     }
@@ -262,27 +246,12 @@ public class MutationState implements SQLCloseable {
         if (table.getType() == PTableType.INDEX || !table.isTransactional()) {
             return;
         }
-        byte[] logicalKey = table.getName().getBytes();
-        TransactionAware logicalTxAware = VisibilityFence.create(logicalKey);
-        if (this.txContext == null) {
-            this.txAwares.add(logicalTxAware);
-        } else {
-            this.txContext.addTransactionAware(logicalTxAware);
-        }
-        byte[] physicalKey = table.getPhysicalName().getBytes();
-        if (Bytes.compareTo(physicalKey, logicalKey) != 0) {
-            TransactionAware physicalTxAware = VisibilityFence.create(physicalKey);
-            if (this.txContext == null) {
-                this.txAwares.add(physicalTxAware);
-            } else {
-                this.txContext.addTransactionAware(physicalTxAware);
-            }
-        }
+
+        phoenixTransactionContext.markDMLFence(table);
     }
     
     public boolean checkpointIfNeccessary(MutationPlan plan) throws SQLException {
-        Transaction currentTx = getTransaction();
-        if (getTransaction() == null || plan.getTargetRef() == null || plan.getTargetRef().getTable() == null || !plan.getTargetRef().getTable().isTransactional()) {
+        if (! phoenixTransactionContext.isTransactionRunning()  || plan.getTargetRef() == null || plan.getTargetRef().getTable() == null || !plan.getTargetRef().getTable().isTransactional()) {
             return false;
         }
         Set<TableRef> sources = plan.getSourceRefs();
@@ -322,40 +291,14 @@ public class MutationState implements SQLCloseable {
                     break;
                 }
             }
-            if (hasUncommittedData) {
-                try {
-                    if (txContext == null) {
-                        currentTx = tx = connection.getQueryServices().getTransactionSystemClient().checkpoint(currentTx);
-                    }  else {
-                        txContext.checkpoint();
-                        currentTx = tx = txContext.getCurrentTransaction();
-                    }
-                    // Since we've checkpointed, we can clear out uncommitted set, since a statement run afterwards
-                    // should see all this data.
-                    uncommittedPhysicalNames.clear();
-                } catch (TransactionFailureException e) {
-                    throw new SQLException(e);
-                } 
-            }
-            // Since we're querying our own table while mutating it, we must exclude
-            // see our current mutations, otherwise we can get erroneous results (for DELETE)
-            // or get into an infinite loop (for UPSERT SELECT).
-            currentTx.setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
+
+            phoenixTransactionContext.checkpoint(hasUncommittedData);
+
             return true;
         }
         return false;
     }
-    
-    private void addTransactionParticipant(TransactionAware txAware) throws SQLException {
-        if (txContext == null) {
-            txAwares.add(txAware);
-            assert(tx != null);
-            txAware.startTx(tx);
-        } else {
-            txContext.addTransactionAware(txAware);
-        }
-    }
-    
+
     // Though MutationState is not thread safe in general, this method should be because it may
     // be called by TableResultIterator in a multi-threaded manner. Since we do not want to expose
     // the Transaction outside of MutationState, this seems reasonable, as the member variables
@@ -372,68 +315,52 @@ public class MutationState implements SQLCloseable {
         }
         return htable;
     }
-    
+
     public PhoenixConnection getConnection() {
         return connection;
     }
-    
-    // Kept private as the Transaction may change when check pointed. Keeping it private ensures
-    // no one holds on to a stale copy.
-    private Transaction getTransaction() {
-        return tx != null ? tx : txContext != null ? txContext.getCurrentTransaction() : null;
-    }
-    
+
     public boolean isTransactionStarted() {
-        return getTransaction() != null;
+        return phoenixTransactionContext.isTransactionRunning();
     }
-    
+
     public long getInitialWritePointer() {
-        Transaction tx = getTransaction();
-        return tx == null ? HConstants.LATEST_TIMESTAMP : tx.getTransactionId(); // First write pointer - won't change with checkpointing
+        return phoenixTransactionContext.getTransactionId(); // First write pointer - won't change with checkpointing
     }
-    
+
     // For testing
     public long getWritePointer() {
-        Transaction tx = getTransaction();
-        return tx == null ? HConstants.LATEST_TIMESTAMP : tx.getWritePointer();
+        return phoenixTransactionContext.getWritePointer();
     }
-    
+
     // For testing
-    public VisibilityLevel getVisibilityLevel() {
-        Transaction tx = getTransaction();
-        return tx == null ? null : tx.getVisibilityLevel();
+    public PhoenixVisibilityLevel getVisibilityLevel() {
+        return phoenixTransactionContext.getVisibilityLevel();
     }
-    
+
     public boolean startTransaction() throws SQLException {
-        if (txContext == null) {
-            throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build().buildException();
-        }
-        
         if (connection.getSCN() != null) {
             throw new SQLExceptionInfo.Builder(
                     SQLExceptionCode.CANNOT_START_TRANSACTION_WITH_SCN_SET)
                     .build().buildException();
         }
-        
-        try {
-            if (!isTransactionStarted()) {
-                // Clear any transactional state in case transaction was ended outside
-                // of Phoenix so we don't carry the old transaction state forward. We
-                // cannot call reset() here due to the case of having mutations and
-                // then transitioning from non transactional to transactional (which
-                // would end up clearing our uncommitted state).
-                resetTransactionalState();
-                txContext.start();
-                return true;
-            }
-        } catch (TransactionFailureException e) {
-            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED).setRootCause(e).build().buildException();
+
+        if (!isTransactionStarted()) {
+            // Clear any transactional state in case transaction was ended outside
+            // of Phoenix so we don't carry the old transaction state forward. We
+            // cannot call reset() here due to the case of having mutations and
+            // then transitioning from non transactional to transactional (which
+            // would end up clearing our uncommitted state).
+            resetTransactionalState();
+            phoenixTransactionContext.begin();
+            return true;
         }
+
         return false;
     }
 
     public static MutationState emptyMutationState(long maxSize, PhoenixConnection connection) {
-        MutationState state = new MutationState(maxSize, connection, Collections.<TableRef, Map<ImmutableBytesPtr,RowMutationState>>emptyMap(), null, null);
+        MutationState state = new MutationState(maxSize, connection, Collections.<TableRef, Map<ImmutableBytesPtr,RowMutationState>>emptyMap(), false, null);
         state.sizeOffset = 0;
         return state;
     }
@@ -512,13 +439,9 @@ public class MutationState implements SQLCloseable {
         if (this == newMutationState) { // Doesn't make sense
             return;
         }
-        if (txContext != null) {
-            for (TransactionAware txAware : newMutationState.txAwares) {
-                txContext.addTransactionAware(txAware);
-            }
-        } else {
-            txAwares.addAll(newMutationState.txAwares);
-        }
+
+        phoenixTransactionContext.join(getPhoenixTransactionContext());
+
         this.sizeOffset += newMutationState.sizeOffset;
         joinMutationState(newMutationState.mutations, this.mutations);
         if (!newMutationState.txMutations.isEmpty()) {
@@ -535,7 +458,7 @@ public class MutationState implements SQLCloseable {
         }
         throwIfTooBig();
     }
-    
+
 
     private static ImmutableBytesPtr getNewRowKeyWithRowTimestamp(ImmutableBytesPtr ptr, long rowTimestamp, PTable table) {
         RowKeySchema schema = table.getRowKeySchema();
@@ -1054,24 +977,15 @@ public class MutationState implements SQLCloseable {
                             txTableRefs.add(origTableRef);
                             addDMLFence(table);
                             uncommittedPhysicalNames.add(table.getPhysicalName().getString());
-                            
+
                             // If we have indexes, wrap the HTable in a delegate HTable that
                             // will attach the necessary index meta data in the event of a
                             // rollback
                             if (!table.getIndexes().isEmpty()) {
                                 hTable = new MetaDataAwareHTable(hTable, origTableRef);
                             }
-                            TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table.isImmutableRows());
-                            // Don't add immutable indexes (those are the only ones that would participate
-                            // during a commit), as we don't need conflict detection for these.
-                            if (tableInfo.isDataTable()) {
-                                // Even for immutable, we need to do this so that an abort has the state
-                                // necessary to generate the rows to delete.
-                                addTransactionParticipant(txnAware);
-                            } else {
-                                txnAware.startTx(getTransaction());
-                            }
-                            hTable = txnAware;
+
+                            hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table.isImmutableRows());                          
                         }
                         
                         long numMutations = mutationList.size();
@@ -1261,29 +1175,22 @@ public class MutationState implements SQLCloseable {
         this.mutations.clear();
         resetTransactionalState();
     }
-    
+
     private void resetTransactionalState() {
-        tx = null;
-        txAwares.clear();
+        phoenixTransactionContext.reset();
         txMutations = Collections.emptyMap();
         uncommittedPhysicalNames.clear();
         uncommittedStatementIndexes = EMPTY_STATEMENT_INDEX_ARRAY;
     }
-    
+
     public void rollback() throws SQLException {
         try {
-            if (txContext != null && isTransactionStarted()) {
-                try {
-                    txContext.abort();
-                } catch (TransactionFailureException e) {
-                    throw TransactionUtil.getTransactionFailureException(e);
-                }
-            }
+            phoenixTransactionContext.abort();
         } finally {
             resetState();
         }
     }
-    
+
     public void commit() throws SQLException {
         Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> txMutations = Collections.emptyMap();
         int retryCount = 0;
@@ -1299,38 +1206,32 @@ public class MutationState implements SQLCloseable {
                 sqlE = e;
             } finally {
                 try {
-                    if (txContext != null && isTransactionStarted()) {
-                        TransactionFailureException txFailure = null;
-                        boolean finishSuccessful=false;
-                        try {
-                            if (sendSuccessful) {
-                                txContext.finish();
-                                finishSuccessful = true;
-                            }
-                        } catch (TransactionFailureException e) {
-                            if (logger.isInfoEnabled()) logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer() + " with retry count of " + retryCount);
-                            retryCommit = (e instanceof TransactionConflictException && retryCount < MAX_COMMIT_RETRIES);
-                            txFailure = e;
-                            SQLException nextE = TransactionUtil.getTransactionFailureException(e);
-                            if (sqlE == null) {
-                                sqlE = nextE;
-                            } else {
-                                sqlE.setNextException(nextE);
-                            }
-                        } finally {
-                            // If send fails or finish fails, abort the tx
-                            if (!finishSuccessful) {
-                                try {
-                                    txContext.abort(txFailure);
-                                    if (logger.isInfoEnabled()) logger.info("Abort successful");
-                                } catch (TransactionFailureException e) {
-                                    if (logger.isInfoEnabled()) logger.info("Abort failed with " + e);
-                                    SQLException nextE = TransactionUtil.getTransactionFailureException(e);
-                                    if (sqlE == null) {
-                                        sqlE = nextE;
-                                    } else {
-                                        sqlE.setNextException(nextE);
-                                    }
+                    boolean finishSuccessful=false;
+                    try {
+                        if (sendSuccessful) {
+                            phoenixTransactionContext.commit();
+                            finishSuccessful = true;
+                        }
+                    } catch (SQLException e) {
+                        if (logger.isInfoEnabled()) logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer() + " with retry count of " + retryCount);
+                        retryCommit = (e.getErrorCode() == SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode() && retryCount < MAX_COMMIT_RETRIES);
+                        if (sqlE == null) {
+                            sqlE = e;
+                        } else {
+                            sqlE.setNextException(e);
+                        }
+                    } finally {
+                        // If send fails or finish fails, abort the tx
+                        if (!finishSuccessful) {
+                            try {
+                                phoenixTransactionContext.abort();
+                                if (logger.isInfoEnabled()) logger.info("Abort successful");
+                            } catch (SQLException e) {
+                                if (logger.isInfoEnabled()) logger.info("Abort failed with " + e);
+                                if (sqlE == null) {
+                                    sqlE = e;
+                                } else {
+                                    sqlE.setNextException(e);
                                 }
                             }
                         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa69563e/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index 937ac14..596cf73 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -4,6 +4,7 @@ import java.sql.SQLException;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.phoenix.schema.PTable;
+import org.slf4j.Logger;
 
 public class OmidTransactionContext implements PhoenixTransactionContext {
 
@@ -32,8 +33,7 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public void commitDDLFence(PTable dataTable) throws SQLException,
-            InterruptedException, TimeoutException {
+    public void commitDDLFence(PTable dataTable, Logger logger) throws SQLException {
         // TODO Auto-generated method stub
 
     }
@@ -74,4 +74,15 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
         return 0;
     }
 
+    @Override
+    public long getWritePointer() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public PhoenixVisibilityLevel getVisibilityLevel() {
+        // TODO Auto-generated method stub
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa69563e/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index 87b68f9..2d0d5b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -1,6 +1,8 @@
 package org.apache.phoenix.transaction;
 
 import org.apache.phoenix.schema.PTable;
+import org.apache.tephra.Transaction.VisibilityLevel;
+import org.slf4j.Logger;
 
 import java.sql.SQLException;
 import java.util.concurrent.TimeoutException;
@@ -8,6 +10,17 @@ import java.util.concurrent.TimeoutException;
 public interface PhoenixTransactionContext {
 
     /**
+     * 
+     * Visibility levels needed for checkpointing and  
+     *
+     */
+    public enum PhoenixVisibilityLevel {
+        SNAPSHOT,
+        SNAPSHOT_EXCLUDE_CURRENT,
+        SNAPSHOT_ALL
+      }
+
+    /**
      * Starts a transaction
      *
      * @throws SQLException
@@ -43,8 +56,8 @@ public interface PhoenixTransactionContext {
      * @throws InterruptedException
      * @throws TimeoutException
      */
-    public void commitDDLFence(PTable dataTable)
-            throws SQLException, InterruptedException, TimeoutException;
+    public void commitDDLFence(PTable dataTable, Logger logger)
+            throws SQLException;
 
     /**
      * mark DML with table information for conflict detection of concurrent
@@ -80,4 +93,14 @@ public interface PhoenixTransactionContext {
      * Returns transaction snapshot id
      */
     long getReadPointer();
+
+    /**
+     * Returns transaction write pointer. After checkpoint the write pointer is different than the initial one  
+     */
+    long getWritePointer();
+
+    /**
+     * Returns visibility level 
+     */
+    PhoenixVisibilityLevel getVisibilityLevel();    
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa69563e/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 8fc5e0f..f8096d5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -24,6 +24,8 @@ import org.apache.tephra.visibility.VisibilityFence;
 
 import com.google.common.collect.Lists;
 
+import org.slf4j.Logger;
+
 public class TephraTransactionContext implements PhoenixTransactionContext {
 
     private final List<TransactionAware> txAwares;
@@ -32,24 +34,26 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     private TransactionSystemClient txServiceClient;
     private TransactionFailureException e;
 
-    public TephraTransactionContext(PhoenixTransactionContext ctx, PhoenixConnection connection, boolean threadSafe) {
 
+    public TephraTransactionContext(PhoenixConnection connection) {
+        this.txServiceClient = connection.getQueryServices().getTransactionSystemClient();
+        this.txAwares = Collections.emptyList();
+        this.txContext = new TransactionContext(txServiceClient);
+    }
+
+    public TephraTransactionContext(PhoenixTransactionContext ctx, PhoenixConnection connection, boolean subTask) {
         this.txServiceClient = connection.getQueryServices().getTransactionSystemClient();
 
         assert(ctx instanceof TephraTransactionContext);
         TephraTransactionContext tephraTransactionContext = (TephraTransactionContext) ctx;
 
-        if (threadSafe) {
+        if (subTask) {
             this.tx = tephraTransactionContext.getTransaction();
             this.txAwares = Lists.newArrayList();
             this.txContext = null;
         } else {
             this.txAwares = Collections.emptyList();
-            if (ctx == null) {
-                this.txContext = new TransactionContext(txServiceClient);
-            } else {
-                this.txContext = tephraTransactionContext.getContext();
-            }
+            this.txContext = tephraTransactionContext.getContext();
         }
 
         this.e = null;
@@ -73,8 +77,12 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
     @Override
     public void commit() throws SQLException {
+        
+        if (txContext == null || !isTransactionRunning()) {
+            return;
+        }
+        
         try {
-            assert(txContext != null);
             txContext.finish();
         } catch (TransactionFailureException e) {
             this.e = e;
@@ -93,6 +101,11 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
     @Override
     public void abort() throws SQLException {
+        
+        if (txContext == null || !isTransactionRunning()) {
+            return;
+        }
+            
         try {
             if (e != null) {
                 txContext.abort(e);
@@ -125,6 +138,9 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             }
         }
 
+        // Since we're querying our own table while mutating it, we must exclude
+        // see our current mutations, otherwise we can get erroneous results (for DELETE)
+        // or get into an infinite loop (for UPSERT SELECT).
         if (txContext == null) {
             tx.setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
         }
@@ -135,12 +151,16 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public void commitDDLFence(PTable dataTable) throws SQLException,
-            InterruptedException, TimeoutException {
+    public void commitDDLFence(PTable dataTable, Logger logger) throws SQLException {
         byte[] key = dataTable.getName().getBytes();
+
         try {
             FenceWait fenceWait = VisibilityFence.prepareWait(key, txServiceClient);
             fenceWait.await(10000, TimeUnit.MILLISECONDS);
+            
+            if (logger.isInfoEnabled()) {
+                logger.info("Added write fence at ~" + getTransaction().getReadPointer());
+            }
         } catch (InterruptedException e) {
             Thread.currentThread().interrupt();
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
@@ -156,11 +176,13 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     public void markDMLFence(PTable table) {
         byte[] logicalKey = table.getName().getBytes();
         TransactionAware logicalTxAware = VisibilityFence.create(logicalKey);
+
         if (this.txContext == null) {
             this.txAwares.add(logicalTxAware);
         } else {
             this.txContext.addTransactionAware(logicalTxAware);
         }
+
         byte[] physicalKey = table.getPhysicalName().getBytes();
         if (Bytes.compareTo(physicalKey, logicalKey) != 0) {
             TransactionAware physicalTxAware = VisibilityFence.create(physicalKey);
@@ -233,6 +255,48 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         return (-1);
     }
 
+    // For testing
+    @Override
+    public long getWritePointer() {
+        if (this.txContext != null) {
+            return txContext.getCurrentTransaction().getWritePointer();
+        }
+
+        if (tx != null) {
+            return tx.getWritePointer();
+        }
+
+        return HConstants.LATEST_TIMESTAMP;
+    }
+
+    // For testing
+    @Override
+    public PhoenixVisibilityLevel getVisibilityLevel() {
+        VisibilityLevel visibilityLevel = null;
+
+        if (this.txContext != null) {
+            visibilityLevel = txContext.getCurrentTransaction().getVisibilityLevel();
+        } else if (tx != null) {
+            visibilityLevel = tx.getVisibilityLevel();
+        }
+
+        PhoenixVisibilityLevel phoenixVisibilityLevel;
+        switch(visibilityLevel) {
+        case SNAPSHOT:
+            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT;
+            break;
+        case SNAPSHOT_EXCLUDE_CURRENT:
+            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
+            break;
+        case SNAPSHOT_ALL:
+            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_ALL;
+        default:
+            phoenixVisibilityLevel = null;
+        }
+
+        return phoenixVisibilityLevel;
+    }
+
    /**
     * TephraTransactionContext specific functions
     */
@@ -254,32 +318,8 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             txContext.addTransactionAware(txAware);
         } else if (this.tx != null) {
             txAwares.add(txAware);
+            assert(tx != null);
+            txAware.startTx(tx);
         }
     }
-
-    // For testing
-    public long getWritePointer() {
-        if (this.txContext != null) {
-            return txContext.getCurrentTransaction().getWritePointer();
-        }
-
-        if (tx != null) {
-            return tx.getWritePointer();
-        }
-
-        return HConstants.LATEST_TIMESTAMP;
-    }
-
-    // For testing
-    public VisibilityLevel getVisibilityLevel() {
-        if (this.txContext != null) {
-            return txContext.getCurrentTransaction().getVisibilityLevel();
-        }
-
-        if (tx != null) {
-            return tx.getVisibilityLevel();
-        }
-
-        return null;
-    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa69563e/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index 50ea600..e33a280 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.tephra.TxConstants;
 import org.apache.tephra.hbase.TransactionAwareHTable;
 
 import com.google.protobuf.Descriptors.MethodDescriptor;
@@ -37,12 +38,16 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
     private TephraTransactionContext tephraTransactionContext;
 
     public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) {
+        this(ctx, hTable, false);
+    }
+
+    public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable, boolean isImmutableRows) {
 
         assert(ctx instanceof TephraTransactionContext);
 
         tephraTransactionContext = (TephraTransactionContext) ctx;
 
-        transactionAwareHTable = new TransactionAwareHTable(hTable);
+        transactionAwareHTable = new TransactionAwareHTable(hTable, isImmutableRows ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
 
         tephraTransactionContext.addTransactionAware(transactionAwareHTable);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fa69563e/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
index 04882e0..4fbbe57 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
@@ -29,6 +29,9 @@ import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
+import org.apache.phoenix.transaction.PhoenixTransactionalTable;
+import org.apache.phoenix.transaction.TephraTransactionTable;
 import org.apache.tephra.TransactionConflictException;
 import org.apache.tephra.TransactionFailureException;
 import org.apache.tephra.TxConstants;
@@ -50,23 +53,8 @@ public class TransactionUtil {
         return serverTimeStamp / TxConstants.MAX_TX_PER_MS;
     }
     
-    public static SQLException getTransactionFailureException(TransactionFailureException e) {
-        if (e instanceof TransactionConflictException) { 
-            return new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION)
-                .setMessage(e.getMessage())
-                .setRootCause(e)
-                .build().buildException();
-
-        }
-        return new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
-            .setMessage(e.getMessage())
-            .setRootCause(e)
-            .build().buildException();
-    }
-    
-    public static TransactionAwareHTable getTransactionAwareHTable(HTableInterface htable, boolean isImmutableRows) {
-    	// Conflict detection is not needed for tables with write-once/append-only data
-    	return new TransactionAwareHTable(htable, isImmutableRows ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
+    public static PhoenixTransactionalTable getPhoenixTransactionTable(PhoenixTransactionContext phoenixTransactionContext, HTableInterface htable, boolean isImmutableRows) {
+        return new TephraTransactionTable(phoenixTransactionContext, htable, isImmutableRows);
     }
     
     // we resolve transactional tables at the txn read pointer


[31/46] phoenix git commit: PHOENIX-3759 Dropping a local index causes NPE

Posted by td...@apache.org.
PHOENIX-3759 Dropping a local index causes NPE


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/92b951e5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/92b951e5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/92b951e5

Branch: refs/heads/omid
Commit: 92b951e5387768e084ed09729884a59160cd81d3
Parents: 90e32c0
Author: Ankit Singhal <an...@gmail.com>
Authored: Fri Apr 21 11:48:54 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Fri Apr 21 11:48:54 2017 +0530

----------------------------------------------------------------------
 .../apache/phoenix/end2end/index/LocalIndexIT.java   | 15 ++++++++++++---
 .../java/org/apache/phoenix/util/RepairUtil.java     | 11 +++++++----
 2 files changed, 19 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/92b951e5/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 8d3316b..ea4780b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -599,21 +599,30 @@ public class LocalIndexIT extends BaseLocalIndexIT {
             admin.disableTable(tableName);
             copyLocalIndexHFiles(config, tableRegions.get(0), tableRegions.get(1), false);
             copyLocalIndexHFiles(config, tableRegions.get(3), tableRegions.get(0), false);
-
             admin.enableTable(tableName);
 
             int count=getCount(conn, tableName, "L#0");
             assertTrue(count > 14);
-            admin.majorCompact(tableName);
+            admin.majorCompact(TableName.valueOf(tableName));
             int tryCount = 5;// need to wait for rebuilding of corrupted local index region
             while (tryCount-- > 0 && count != 14) {
-                Thread.sleep(30000);
+                Thread.sleep(15000);
                 count = getCount(conn, tableName, "L#0");
             }
             assertEquals(14, count);
             rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName1);
             assertTrue(rs.next());
             assertEquals(7, rs.getLong(1));
+            statement.execute("DROP INDEX " + indexName1 + " ON " + tableName);
+            admin.majorCompact(TableName.valueOf(tableName));
+            statement.execute("DROP INDEX " + indexName + " ON " + tableName);
+            admin.majorCompact(TableName.valueOf(tableName));
+            Thread.sleep(15000);
+            admin.majorCompact(TableName.valueOf(tableName));
+            Thread.sleep(15000);
+            rs = statement.executeQuery("SELECT COUNT(*) FROM " + tableName);
+            assertTrue(rs.next());
+            
         }
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92b951e5/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
index b9b7526..ea14715 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
@@ -29,10 +29,13 @@ public class RepairUtil {
         byte[] endKey = environment.getRegion().getRegionInfo().getEndKey();
         byte[] indexKeyEmbedded = startKey.length == 0 ? new byte[endKey.length] : startKey;
         for (StoreFile file : store.getStorefiles()) {
-            byte[] fileFirstRowKey = KeyValue.createKeyValueFromKey(file.getReader().getFirstKey()).getRow();;
-            if ((fileFirstRowKey != null && Bytes.compareTo(file.getReader().getFirstKey(), 0, indexKeyEmbedded.length,
-                    indexKeyEmbedded, 0, indexKeyEmbedded.length) != 0)
-                    /*|| (endKey.length > 0 && Bytes.compareTo(file.getLastKey(), endKey) < 0)*/) { return false; }
+            if (file.getReader() != null && file.getReader().getFirstKey() != null) {
+                byte[] fileFirstRowKey = KeyValue.createKeyValueFromKey(file.getReader().getFirstKey()).getRow();
+                ;
+                if ((fileFirstRowKey != null && Bytes.compareTo(file.getReader().getFirstKey(), 0,
+                        indexKeyEmbedded.length, indexKeyEmbedded, 0, indexKeyEmbedded.length) != 0)
+                /* || (endKey.length > 0 && Bytes.compareTo(file.getLastKey(), endKey) < 0) */) { return false; }
+            }
         }
         return true;
     }


[41/46] phoenix git commit: PHOENIX-3824 Mutable Index partial rebuild should add only one index row per updated data row.

Posted by td...@apache.org.
PHOENIX-3824 Mutable Index partial rebuild should add only one index row per updated data row.

Signed-off-by: Lars Hofhansl <la...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/85e344fd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/85e344fd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/85e344fd

Branch: refs/heads/omid
Commit: 85e344fdfcc65d4992336eb52868d7ba78ba55d1
Parents: a1d3c16
Author: Vincent Poon <vi...@gmail.com>
Authored: Mon May 8 17:18:54 2017 -0700
Committer: Lars Hofhansl <la...@apache.org>
Committed: Mon May 8 17:18:54 2017 -0700

----------------------------------------------------------------------
 .../hbase/index/covered/data/LocalTable.java    |  22 +-
 .../index/covered/TestNonTxIndexBuilder.java    | 317 +++++++++++++++++++
 .../index/covered/data/TestLocalTable.java      |  63 ++++
 3 files changed, 401 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/85e344fd/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 003df2a..85c54ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -33,6 +33,10 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
 
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Ordering;
+import com.google.common.primitives.Longs;
+
 /**
  * Wrapper around a lazily instantiated, local HTable.
  * <p>
@@ -61,7 +65,8 @@ public class LocalTable implements LocalHBaseState {
     if (ignoreNewerMutations) {
         // Provides a means of client indicating that newer cells should not be considered,
         // enabling mutations to be replayed to partially rebuild the index when a write fails.
-        long ts = m.getFamilyCellMap().firstEntry().getValue().get(0).getTimestamp();
+        // When replaying mutations we want the oldest timestamp (as anything newer we be replayed)
+        long ts = getOldestTimestamp(m.getFamilyCellMap().values());
         s.setTimeRange(0,ts);
     }
     Region region = this.env.getRegion();
@@ -74,4 +79,19 @@ public class LocalTable implements LocalHBaseState {
     scanner.close();
     return r;
   }
+
+    // Returns the smallest timestamp in the given cell lists.
+    // It is assumed that the lists have cells ordered from largest to smallest timestamp
+    protected long getOldestTimestamp(Collection<List<Cell>> cellLists) {
+        Ordering<List<Cell>> cellListOrdering = new Ordering<List<Cell>>() {
+            @Override
+            public int compare(List<Cell> left, List<Cell> right) {
+                // compare the last element of each list, since that is the smallest in that list
+                return Longs.compare(Iterables.getLast(left).getTimestamp(),
+                    Iterables.getLast(right).getTimestamp());
+            }
+        };
+        List<Cell> minList = cellListOrdering.min(cellLists);
+        return Iterables.getLast(minList).getTimestamp();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/85e344fd/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestNonTxIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestNonTxIndexBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestNonTxIndexBuilder.java
new file mode 100644
index 0000000..d4d69b4
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestNonTxIndexBuilder.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hbase.index.covered;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.NavigableMap;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.coprocessor.BaseRegionScanner;
+import org.apache.phoenix.hbase.index.MultiMutation;
+import org.apache.phoenix.hbase.index.covered.data.LocalTable;
+import org.apache.phoenix.hbase.index.covered.update.ColumnTracker;
+import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.index.PhoenixIndexCodec;
+import org.apache.phoenix.index.PhoenixIndexMetaData;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.BaseConnectionlessQueryTest;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+
+public class TestNonTxIndexBuilder extends BaseConnectionlessQueryTest {
+    private static final String TEST_TABLE_STRING = "TEST_TABLE";
+    private static final String TEST_TABLE_DDL = "CREATE TABLE IF NOT EXISTS " +
+            TEST_TABLE_STRING + " (\n" +
+        "    ORGANIZATION_ID CHAR(4) NOT NULL,\n" +
+        "    ENTITY_ID CHAR(7) NOT NULL,\n" +
+        "    SCORE INTEGER,\n" +
+        "    LAST_UPDATE_TIME TIMESTAMP\n" +
+        "    CONSTRAINT TEST_TABLE_PK PRIMARY KEY (\n" +
+        "        ORGANIZATION_ID,\n" +
+        "        ENTITY_ID\n" +
+        "    )\n" +
+        ") VERSIONS=1, MULTI_TENANT=TRUE";
+    private static final String TEST_TABLE_INDEX_STRING = "TEST_TABLE_SCORE";
+    private static final String TEST_TABLE_INDEX_DDL = "CREATE INDEX IF NOT EXISTS " +
+            TEST_TABLE_INDEX_STRING
+            + " ON " + TEST_TABLE_STRING + " (SCORE DESC, ENTITY_ID DESC)";
+    private static final byte[] ROW = Bytes.toBytes("org1entity1"); //length 4 + 7 (see ddl)
+    private static final String FAM_STRING = QueryConstants.DEFAULT_COLUMN_FAMILY;
+    private static final byte[] FAM = Bytes.toBytes(FAM_STRING);
+    private static final byte[] INDEXED_QUALIFIER = Bytes.toBytes("SCORE");
+    private static final byte[] VALUE_1 = Bytes.toBytes(111);
+    private static final byte[] VALUE_2 = Bytes.toBytes(222);
+    private static final byte[] VALUE_3 = Bytes.toBytes(333);
+    private static final byte PUT_TYPE = KeyValue.Type.Put.getCode();
+
+    private NonTxIndexBuilder indexBuilder;
+    private PhoenixIndexMetaData mockIndexMetaData;
+    // Put your current row state in here - the index builder will read from this in LocalTable
+    // to determine whether the index has changed.
+    // Whatever we return here should match the table DDL (e.g. length of column value)
+    private List<Cell> currentRowCells;
+
+    /**
+     * Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be
+     * called, where any read requests to
+     * {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test
+     * field 'currentRowCells'
+     */
+    @Before
+    public void setup() throws Exception {
+        RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
+        Configuration conf = new Configuration(false);
+        conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
+        Mockito.when(env.getConfiguration()).thenReturn(conf);
+
+        // the following is used by LocalTable#getCurrentRowState()
+        Region mockRegion = Mockito.mock(Region.class);
+        Mockito.when(env.getRegion()).thenReturn(mockRegion);
+
+        Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class)))
+                .thenAnswer(new Answer<RegionScanner>() {
+                    @Override
+                    public RegionScanner answer(InvocationOnMock invocation) throws Throwable {
+                        Scan sArg = (Scan) invocation.getArguments()[0];
+                        TimeRange timeRange = sArg.getTimeRange();
+                        return getMockTimeRangeRegionScanner(timeRange);
+                    }
+                });
+
+        // the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes()
+        HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
+        Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
+        Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a"));
+        Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
+
+        mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class);
+        Mockito.when(mockIndexMetaData.isImmutableRows()).thenReturn(false);
+        Mockito.when(mockIndexMetaData.getIndexMaintainers())
+                .thenReturn(Collections.singletonList(getTestIndexMaintainer()));
+
+        indexBuilder = new NonTxIndexBuilder();
+        indexBuilder.setup(env);
+    }
+
+    // returns a RegionScanner which filters currentRowCells using the given TimeRange.
+    // This is called from LocalTable#getCurrentRowState()
+    // If testIndexMetaData.ignoreNewerMutations() is not set, default TimeRange is 0 to
+    // Long.MAX_VALUE
+    private RegionScanner getMockTimeRangeRegionScanner(final TimeRange timeRange) {
+        return new BaseRegionScanner(Mockito.mock(RegionScanner.class)) {
+            @Override
+            public boolean next(List<Cell> results) throws IOException {
+                for (Cell cell : currentRowCells) {
+                    if (cell.getTimestamp() >= timeRange.getMin()
+                            && cell.getTimestamp() < timeRange.getMax()) {
+                        results.add(cell);
+                    }
+                }
+                return false; // indicate no more results
+            }
+        };
+    }
+
+    private IndexMaintainer getTestIndexMaintainer() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+        // disable column encoding, makes debugging easier
+        props.put(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, "0");
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        try {
+            conn.setAutoCommit(true);
+            conn.createStatement().execute(TEST_TABLE_DDL);
+            conn.createStatement().execute(TEST_TABLE_INDEX_DDL);
+            PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+            PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TEST_TABLE_STRING));
+            ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+            table.getIndexMaintainers(ptr, pconn);
+            List<IndexMaintainer> indexMaintainerList =
+                    IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true);
+            assertEquals(1, indexMaintainerList.size());
+            IndexMaintainer indexMaintainer = indexMaintainerList.get(0);
+            return indexMaintainer;
+        } finally {
+            conn.close();
+        }
+    }
+
+    /**
+     * Tests that updating an indexed column results in a DeleteFamily (prior index cell) and a Put
+     * (new index cell)
+     */
+    @Test
+    public void testGetMutableIndexUpdate() throws IOException {
+        setCurrentRowState(FAM, INDEXED_QUALIFIER, 1, VALUE_1);
+
+        // update ts and value
+        Put put = new Put(ROW);
+        put.addImmutable(FAM, INDEXED_QUALIFIER, 2, VALUE_2);
+        MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW));
+        mutation.addAll(put);
+
+        Collection<Pair<Mutation, byte[]>> indexUpdates =
+                indexBuilder.getIndexUpdate(mutation, mockIndexMetaData);
+        assertEquals(2, indexUpdates.size());
+        assertContains(indexUpdates, 2, ROW, KeyValue.Type.DeleteFamily, FAM,
+            new byte[0] /* qual not needed */, 2);
+        assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW,
+            KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 2);
+    }
+
+    /**
+     * Tests a partial rebuild of a row with multiple versions. 3 versions of the row in data table,
+     * and we rebuild the index starting from time t=2
+     */
+    @Test
+    public void testRebuildMultipleVersionRow() throws IOException {
+        // when doing a rebuild, we are replaying mutations so we want to ignore newer mutations
+        // see LocalTable#getCurrentRowState()
+        Mockito.when(mockIndexMetaData.ignoreNewerMutations()).thenReturn(true);
+
+        // the current row state has 3 versions, but if we rebuild as of t=2, scanner in LocalTable
+        // should only return first
+        Cell currentCell1 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 1, PUT_TYPE, VALUE_1);
+        Cell currentCell2 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 2, PUT_TYPE, VALUE_2);
+        Cell currentCell3 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 3, PUT_TYPE, VALUE_3);
+        setCurrentRowState(Arrays.asList(currentCell3, currentCell2, currentCell1));
+
+        // rebuilder replays mutations starting from t=2
+        MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW));
+        Put put = new Put(ROW);
+        put.addImmutable(FAM, INDEXED_QUALIFIER, 3, VALUE_3);
+        mutation.addAll(put);
+        put = new Put(ROW);
+        put.addImmutable(FAM, INDEXED_QUALIFIER, 2, VALUE_2);
+        mutation.addAll(put);
+
+        Collection<Pair<Mutation, byte[]>> indexUpdates =
+                indexBuilder.getIndexUpdate(mutation, mockIndexMetaData);
+        assertEquals(2, indexUpdates.size());
+        assertContains(indexUpdates, 2, ROW, KeyValue.Type.DeleteFamily, FAM,
+            new byte[0] /* qual not needed */, 2);
+        assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW,
+            KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 3);
+    }
+
+    /**
+     * Tests getting an index update for a mutation with 200 versions Before, the issue PHOENIX-3807
+     * was causing this test to take >90 seconds, so here we set a timeout of 5 seconds
+     */
+    @Test(timeout = 5000)
+    public void testManyVersions() throws IOException {
+        // when doing a rebuild, we are replaying mutations so we want to ignore newer mutations
+        // see LocalTable#getCurrentRowState()
+        Mockito.when(mockIndexMetaData.ignoreNewerMutations()).thenReturn(true);
+        MultiMutation mutation = getMultipleVersionMutation(200);
+        currentRowCells = mutation.getFamilyCellMap().get(FAM);
+
+        Collection<Pair<Mutation, byte[]>> indexUpdates =
+                indexBuilder.getIndexUpdate(mutation, mockIndexMetaData);
+        assertNotEquals(0, indexUpdates.size());
+    }
+
+    // Assert that the given collection of indexUpdates contains the given cell
+    private void assertContains(Collection<Pair<Mutation, byte[]>> indexUpdates,
+            final long mutationTs, final byte[] row, final Type cellType, final byte[] fam,
+            final byte[] qual, final long cellTs) {
+        Predicate<Pair<Mutation, byte[]>> hasCellPredicate =
+                new Predicate<Pair<Mutation, byte[]>>() {
+                    @Override
+                    public boolean apply(Pair<Mutation, byte[]> input) {
+                        assertEquals(TEST_TABLE_INDEX_STRING, Bytes.toString(input.getSecond()));
+                        Mutation mutation = input.getFirst();
+                        if (mutationTs == mutation.getTimeStamp()) {
+                            NavigableMap<byte[], List<Cell>> familyCellMap =
+                                    mutation.getFamilyCellMap();
+                            Cell updateCell = familyCellMap.get(fam).get(0);
+                            if (cellType == KeyValue.Type.codeToType(updateCell.getTypeByte())
+                                    && Bytes.compareTo(fam, CellUtil.cloneFamily(updateCell)) == 0
+                                    && Bytes.compareTo(qual,
+                                        CellUtil.cloneQualifier(updateCell)) == 0
+                                    && cellTs == updateCell.getTimestamp()) {
+                                return true;
+                            }
+                        }
+                        return false;
+                    }
+                };
+        Optional<Pair<Mutation, byte[]>> tryFind =
+                Iterables.tryFind(indexUpdates, hasCellPredicate);
+        assertTrue(tryFind.isPresent());
+    }
+
+    private void setCurrentRowState(byte[] fam2, byte[] indexedQualifier, int i, byte[] value1) {
+        Cell cell = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 1, PUT_TYPE, VALUE_1);
+        currentRowCells = Collections.singletonList(cell);
+    }
+
+    private void setCurrentRowState(List<Cell> cells) {
+        currentRowCells = cells;
+    }
+
+    private MultiMutation getMultipleVersionMutation(int versions) {
+        MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW));
+        for (int i = versions - 1; i >= 0; i--) {
+            Put put = new Put(ROW);
+            put.addImmutable(FAM, INDEXED_QUALIFIER, i, Bytes.toBytes(i));
+            mutation.addAll(put);
+        }
+        return mutation;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/85e344fd/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestLocalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestLocalTable.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestLocalTable.java
new file mode 100644
index 0000000..b11ac8d
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestLocalTable.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hbase.index.covered.data;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+public class TestLocalTable {
+    private static final byte[] ROW = Bytes.toBytes("test_row");
+
+    @Test
+    public void testGetOldestTimestamp() {
+        LocalTable localTable = new LocalTable(null);
+
+        List<Cell> cellList1 = getCellList(new KeyValue(ROW, 5L), new KeyValue(ROW, 4L));
+        assertEquals(4L, localTable.getOldestTimestamp(Collections.singletonList(cellList1)));
+
+        List<Cell> cellList2 = getCellList(new KeyValue(ROW, 5L), new KeyValue(ROW, 2L));
+        List<List<Cell>> set1 = new ArrayList<>(Arrays.asList(cellList1, cellList2));
+        assertEquals(2L, localTable.getOldestTimestamp(set1));
+
+        List<Cell> cellList3 = getCellList(new KeyValue(ROW, 1L));
+        set1.add(cellList3);
+        assertEquals(1L, localTable.getOldestTimestamp(set1));
+
+        List<Cell> cellList4 =
+                getCellList(new KeyValue(ROW, 3L), new KeyValue(ROW, 1L), new KeyValue(ROW, 0L));
+        set1.add(cellList4);
+        assertEquals(0L, localTable.getOldestTimestamp(set1));
+    }
+
+    private List<Cell> getCellList(KeyValue... kvs) {
+        List<Cell> cellList = new ArrayList<>();
+        for (KeyValue kv : kvs) {
+            cellList.add(kv);
+        }
+        return cellList;
+    }
+}


[34/46] phoenix git commit: PHOENIX-3800 NPE when doing UPSERT SELECT into salted tables

Posted by td...@apache.org.
PHOENIX-3800 NPE when doing UPSERT SELECT into salted tables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f51c0db9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f51c0db9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f51c0db9

Branch: refs/heads/omid
Commit: f51c0db9f2d2ee261e602a114d47dd63353bbba8
Parents: 5b09901
Author: Ankit Singhal <an...@gmail.com>
Authored: Fri May 5 11:38:44 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Fri May 5 11:38:44 2017 +0530

----------------------------------------------------------------------
 .../apache/phoenix/end2end/UpsertSelectIT.java  | 21 +++++++++++++++-----
 .../apache/phoenix/compile/UpsertCompiler.java  |  2 +-
 2 files changed, 17 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f51c0db9/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index 0a20d47..1c04acb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -78,20 +78,31 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
     
     @Test
     public void testUpsertSelectWithNoIndex() throws Exception {
-        testUpsertSelect(false);
+        testUpsertSelect(false, false);
     }
     
     @Test
     public void testUpsertSelecWithIndex() throws Exception {
-        testUpsertSelect(true);
+        testUpsertSelect(true, false);
     }
     
-    private void testUpsertSelect(boolean createIndex) throws Exception {
+    @Test
+    public void testUpsertSelecWithIndexWithSalt() throws Exception {
+        testUpsertSelect(true, true);
+    }
+
+    @Test
+    public void testUpsertSelecWithNoIndexWithSalt() throws Exception {
+        testUpsertSelect(false, true);
+    }
+
+    private void testUpsertSelect(boolean createIndex, boolean saltTable) throws Exception {
         long ts = nextTimestamp();
         String tenantId = getOrganizationId();
-        initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), null, ts-1, getUrl(), null);
+        byte[][] splits = getDefaultSplits(tenantId);
+        initATableValues(ATABLE_NAME, tenantId, saltTable ? null : splits, null, ts-1, getUrl(), saltTable ? "salt_buckets = 2" : null);
 
-        ensureTableCreated(getUrl(), CUSTOM_ENTITY_DATA_FULL_NAME, CUSTOM_ENTITY_DATA_FULL_NAME, ts-1);
+        ensureTableCreated(getUrl(), CUSTOM_ENTITY_DATA_FULL_NAME, CUSTOM_ENTITY_DATA_FULL_NAME, null, ts-1, saltTable ? "salt_buckets = 2" : null);
         String indexName = "IDX1";
         if (createIndex) {
             Properties props = new Properties();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f51c0db9/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index 2304d83..5559ad7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -677,7 +677,7 @@ public class UpsertCompiler {
                     for (int i = 0 ; i < projectedExpressions.size(); i++) {
                         // Must make new column if position has changed
                         PColumn column = allColumns.get(allColumnsIndexes[i]);
-                        projectedColumns.add(column.getPosition() == i + posOff ? column : new PColumnImpl(column, i));
+                        projectedColumns.add(column.getPosition() == i + posOff ? column : new PColumnImpl(column, i + posOff));
                     }
                     // Build table from projectedColumns
                     // Hack to add default column family to be used on server in case no value column is projected.


[20/46] phoenix git commit: PHOENIX-3781 Backward compatibility issue with old client and 4.10 server

Posted by td...@apache.org.
PHOENIX-3781 Backward compatibility issue with old client and 4.10 server


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/adf56068
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/adf56068
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/adf56068

Branch: refs/heads/omid
Commit: adf5606835f954a4e6b62d2fde8777b15cfec434
Parents: cd444d9
Author: Samarth <sa...@salesforce.com>
Authored: Wed Apr 12 10:45:38 2017 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Wed Apr 12 10:45:38 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/phoenix/schema/PTableImpl.java   | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/adf56068/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index d91ebcb..51f5b0b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -480,8 +480,9 @@ public class PTableImpl implements PTable {
         this.isNamespaceMapped = isNamespaceMapped;
         this.autoPartitionSeqName = autoPartitionSeqName;
         this.isAppendOnlySchema = isAppendOnlySchema;
-        this.immutableStorageScheme = storageScheme;
-        this.qualifierEncodingScheme = qualifierEncodingScheme;
+        // null check for backward compatibility and sanity. If any of the two below is null, then it means the table is a non-encoded table.
+        this.immutableStorageScheme = storageScheme == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : storageScheme;
+        this.qualifierEncodingScheme = qualifierEncodingScheme == null ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : qualifierEncodingScheme;
         List<PColumn> pkColumns;
         PColumn[] allColumns;
         
@@ -586,7 +587,7 @@ public class PTableImpl implements PTable {
                 .orderedBy(Bytes.BYTES_COMPARATOR);
         for (int i = 0; i < families.length; i++) {
             Map.Entry<PName,List<PColumn>> entry = iterator.next();
-            PColumnFamily family = new PColumnFamilyImpl(entry.getKey(), entry.getValue());//, qualifierEncodingScheme);
+            PColumnFamily family = new PColumnFamilyImpl(entry.getKey(), entry.getValue());
             families[i] = family;
             familyByString.put(family.getName().getString(), family);
             familyByBytes.put(family.getName().getBytes(), family);
@@ -896,7 +897,7 @@ public class PTableImpl implements PTable {
                 mutations.add(deleteRow);
             } else {
                 // store all columns for a given column family in a single cell instead of one column per cell in order to improve write performance
-                if (immutableStorageScheme != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
+                if (immutableStorageScheme != null && immutableStorageScheme != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
                     Put put = new Put(this.key);
                     if (isWALDisabled()) {
                         put.setDurability(Durability.SKIP_WAL);
@@ -1300,11 +1301,13 @@ public class PTableImpl implements PTable {
         if (table.hasIsAppendOnlySchema()) {
             isAppendOnlySchema = table.getIsAppendOnlySchema();
         }
-        ImmutableStorageScheme storageScheme = null;
+        // For backward compatibility. Clients older than 4.10 will always have non-encoded immutable tables.
+        ImmutableStorageScheme storageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
         if (table.hasStorageScheme()) {
             storageScheme = ImmutableStorageScheme.fromSerializedValue(table.getStorageScheme().toByteArray()[0]);
         }
-        QualifierEncodingScheme qualifierEncodingScheme = null;
+        // For backward compatibility. Clients older than 4.10 will always have non-encoded qualifiers.
+        QualifierEncodingScheme qualifierEncodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
         if (table.hasEncodingScheme()) {
             qualifierEncodingScheme = QualifierEncodingScheme.fromSerializedValue(table.getEncodingScheme().toByteArray()[0]);
         }


[29/46] phoenix git commit: PHOENIX-3751 spark 2.1 with Phoenix 4.10 load data as dataframe fail, NullPointerException

Posted by td...@apache.org.
PHOENIX-3751 spark 2.1 with Phoenix 4.10 load data as dataframe fail, NullPointerException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/28af89c4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/28af89c4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/28af89c4

Branch: refs/heads/omid
Commit: 28af89c46fa54d7f60adc8be88fdf559cad811d2
Parents: 679ff21
Author: Ankit Singhal <an...@gmail.com>
Authored: Fri Apr 21 11:47:27 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Fri Apr 21 11:47:27 2017 +0530

----------------------------------------------------------------------
 phoenix-spark/src/it/resources/globalSetup.sql                   | 2 +-
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala     | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/28af89c4/phoenix-spark/src/it/resources/globalSetup.sql
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/resources/globalSetup.sql b/phoenix-spark/src/it/resources/globalSetup.sql
index 28eb0f7..dc24da7 100644
--- a/phoenix-spark/src/it/resources/globalSetup.sql
+++ b/phoenix-spark/src/it/resources/globalSetup.sql
@@ -60,4 +60,4 @@ UPSERT INTO "small" VALUES ('key3', 'xyz', 30000)
 CREATE TABLE MULTITENANT_TEST_TABLE (TENANT_ID VARCHAR NOT NULL, ORGANIZATION_ID VARCHAR, GLOBAL_COL1 VARCHAR  CONSTRAINT pk PRIMARY KEY (TENANT_ID, ORGANIZATION_ID)) MULTI_TENANT=true
 CREATE TABLE IF NOT EXISTS GIGANTIC_TABLE (ID INTEGER PRIMARY KEY,unsig_id UNSIGNED_INT,big_id BIGINT,unsig_long_id UNSIGNED_LONG,tiny_id TINYINT,unsig_tiny_id UNSIGNED_TINYINT,small_id SMALLINT,unsig_small_id UNSIGNED_SMALLINT,float_id FLOAT,unsig_float_id UNSIGNED_FLOAT,double_id DOUBLE,unsig_double_id UNSIGNED_DOUBLE,decimal_id DECIMAL,boolean_id BOOLEAN,time_id TIME,date_id DATE,timestamp_id TIMESTAMP,unsig_time_id UNSIGNED_TIME,unsig_date_id UNSIGNED_DATE,unsig_timestamp_id UNSIGNED_TIMESTAMP,varchar_id VARCHAR (30),char_id CHAR (30),binary_id BINARY (100),varbinary_id VARBINARY (100))
  CREATE TABLE IF NOT EXISTS OUTPUT_GIGANTIC_TABLE (ID INTEGER PRIMARY KEY,unsig_id UNSIGNED_INT,big_id BIGINT,unsig_long_id UNSIGNED_LONG,tiny_id TINYINT,unsig_tiny_id UNSIGNED_TINYINT,small_id SMALLINT,unsig_small_id UNSIGNED_SMALLINT,float_id FLOAT,unsig_float_id UNSIGNED_FLOAT,double_id DOUBLE,unsig_double_id UNSIGNED_DOUBLE,decimal_id DECIMAL,boolean_id BOOLEAN,time_id TIME,date_id DATE,timestamp_id TIMESTAMP,unsig_time_id UNSIGNED_TIME,unsig_date_id UNSIGNED_DATE,unsig_timestamp_id UNSIGNED_TIMESTAMP,varchar_id VARCHAR (30),char_id CHAR (30),binary_id BINARY (100),varbinary_id VARBINARY (100))
- upsert into GIGANTIC_TABLE values(0,2,3,4,-5,6,7,8,9.3,10.4,11.5,12.6,13.7,true,CURRENT_TIME(),CURRENT_DATE(),CURRENT_TIME(),CURRENT_TIME(),CURRENT_DATE(),CURRENT_TIME(),'This is random textA','a','a','a')
+ upsert into GIGANTIC_TABLE values(0,2,3,4,-5,6,7,8,9.3,10.4,11.5,12.6,13.7,true,null,null,CURRENT_TIME(),CURRENT_TIME(),CURRENT_DATE(),CURRENT_TIME(),'This is random textA','a','a','a')

http://git-wip-us.apache.org/repos/asf/phoenix/blob/28af89c4/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 63547d2..2c2c6e1 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -134,9 +134,9 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: Seq[String],
       val rowSeq = columns.map { case (name, sqlType) =>
         val res = pr.resultMap(name)
           // Special handling for data types
-          if (dateAsTimestamp && (sqlType == 91 || sqlType == 19)) { // 91 is the defined type for Date and 19 for UNSIGNED_DATE
+          if (dateAsTimestamp && (sqlType == 91 || sqlType == 19) && res!=null) { // 91 is the defined type for Date and 19 for UNSIGNED_DATE
             new java.sql.Timestamp(res.asInstanceOf[java.sql.Date].getTime)
-          } else if (sqlType == 92 || sqlType == 18) { // 92 is the defined type for Time and 18 for UNSIGNED_TIME
+          } else if ((sqlType == 92 || sqlType == 18) && res!=null) { // 92 is the defined type for Time and 18 for UNSIGNED_TIME
             new java.sql.Timestamp(res.asInstanceOf[java.sql.Time].getTime)
           } else {
             res


[17/46] phoenix git commit: PHOENIX-3770 Double quote SYSTEM, USER, DATE keywords in some more IT tests(Rajeshbabu)

Posted by td...@apache.org.
PHOENIX-3770 Double quote SYSTEM, USER,DATE keywords in some more IT tests(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/92e728e0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/92e728e0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/92e728e0

Branch: refs/heads/omid
Commit: 92e728e09ace5dfac93cd04a747f3db8043569ee
Parents: 8b3cc71
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Apr 6 18:47:50 2017 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Apr 6 18:47:50 2017 +0530

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterTableIT.java    |  9 ++--
 .../org/apache/phoenix/end2end/ArrayIT.java     |  2 +-
 .../phoenix/end2end/AutomaticRebuildIT.java     |  4 +-
 .../phoenix/end2end/CoalesceFunctionIT.java     | 12 +++---
 .../apache/phoenix/end2end/CreateTableIT.java   |  2 +-
 .../end2end/IndexToolForPartialBuildIT.java     |  4 +-
 .../end2end/SequenceBulkAllocationIT.java       | 44 ++++++++++----------
 .../org/apache/phoenix/end2end/SequenceIT.java  | 44 ++++++++++----------
 .../phoenix/end2end/SpillableGroupByIT.java     |  2 +-
 .../phoenix/end2end/StatsCollectorIT.java       | 11 ++---
 .../phoenix/end2end/StoreNullsPropIT.java       |  2 +-
 .../apache/phoenix/end2end/UpsertSelectIT.java  | 18 ++++----
 .../apache/phoenix/end2end/index/IndexIT.java   |  5 +--
 .../phoenix/end2end/index/IndexMetadataIT.java  |  6 +--
 .../phoenix/end2end/index/IndexTestUtil.java    |  2 +-
 .../phoenix/end2end/index/LocalIndexIT.java     |  2 +-
 .../org/apache/phoenix/rpc/UpdateCacheIT.java   |  2 +-
 .../phoenix/tx/ParameterizedTransactionIT.java  |  8 ++--
 .../org/apache/phoenix/util/UpgradeUtil.java    |  2 +-
 19 files changed, 92 insertions(+), 89 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index d13b8d2..5c1374f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -759,8 +759,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
             try {
                 String ddl = "CREATE TABLE " + dataTableFullName + " " + "  (a_string varchar not null, col1 integer, cf1.col2 integer"
                         + "  CONSTRAINT pk PRIMARY KEY (a_string)) " +  tableDDLOptions;
-                stmt = conn.prepareStatement(ddl);
-                stmt.execute();
+                conn.createStatement().execute(ddl);
             } finally {
                 closeStatement(stmt);
             }
@@ -2476,7 +2475,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
     }
 	
 	private void assertEncodedCQValue(String columnFamily, String columnName, String schemaName, String tableName, int expectedValue) throws Exception {
-        String query = "SELECT " + COLUMN_QUALIFIER + " FROM SYSTEM.CATALOG WHERE " + TABLE_SCHEM + " = ? AND " + TABLE_NAME
+        String query = "SELECT " + COLUMN_QUALIFIER + " FROM \"SYSTEM\".CATALOG WHERE " + TABLE_SCHEM + " = ? AND " + TABLE_NAME
                 + " = ? " + " AND " + COLUMN_FAMILY + " = ?" + " AND " + COLUMN_NAME  + " = ?" + " AND " + COLUMN_QUALIFIER  + " IS NOT NULL";
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             PreparedStatement stmt = conn.prepareStatement(query);
@@ -2496,7 +2495,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
     }
     
     private void assertEncodedCQCounter(String columnFamily, String schemaName, String tableName, int expectedValue) throws Exception {
-        String query = "SELECT " + COLUMN_QUALIFIER_COUNTER + " FROM SYSTEM.CATALOG WHERE " + TABLE_SCHEM + " = ? AND " + TABLE_NAME
+        String query = "SELECT " + COLUMN_QUALIFIER_COUNTER + " FROM \"SYSTEM\".CATALOG WHERE " + TABLE_SCHEM + " = ? AND " + TABLE_NAME
                 + " = ? " + " AND " + COLUMN_FAMILY + " = ? AND " + COLUMN_QUALIFIER_COUNTER + " IS NOT NULL";
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             PreparedStatement stmt = conn.prepareStatement(query);
@@ -2515,7 +2514,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
     }
     
     private void assertSequenceNumber(String schemaName, String tableName, long expectedSequenceNumber) throws Exception {
-        String query = "SELECT " + TABLE_SEQ_NUM + " FROM SYSTEM.CATALOG WHERE " + TABLE_SCHEM + " = ? AND " + TABLE_NAME
+        String query = "SELECT " + TABLE_SEQ_NUM + " FROM \"SYSTEM\".CATALOG WHERE " + TABLE_SCHEM + " = ? AND " + TABLE_NAME
                 + " = ? AND " +  TABLE_SEQ_NUM + " IS NOT NULL AND " + COLUMN_NAME + " IS NULL AND "
                 + COLUMN_FAMILY + " IS NULL ";
         try (Connection conn = DriverManager.getConnection(getUrl())) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
index 77af84c..26fba02 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
@@ -1773,7 +1773,7 @@ public class ArrayIT extends BaseClientManagedTimeIT {
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
         conn = DriverManager.getConnection(getUrl(), props);
         try {
-            PreparedStatement stmt = conn.prepareStatement("select ?[2] from system.\"catalog\" limit 1");
+            PreparedStatement stmt = conn.prepareStatement("select ?[2] from \"SYSTEM\".\"catalog\" limit 1");
             Array array = conn.createArrayOf("CHAR", new String[] {"a","b","c"});
             stmt.setArray(1, array);
             ResultSet rs = stmt.executeQuery();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutomaticRebuildIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutomaticRebuildIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutomaticRebuildIT.java
index cbb7745..25cab35 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutomaticRebuildIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AutomaticRebuildIT.java
@@ -17,6 +17,8 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -156,7 +158,7 @@ public class AutomaticRebuildIT extends BaseOwnClusterIT {
 				rs = conn.createStatement()
 						.executeQuery(String.format("SELECT " + PhoenixDatabaseMetaData.INDEX_STATE + ","
 								+ PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " FROM "
-								+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " ("
+								+"\""+ SYSTEM_CATALOG_SCHEMA + "\"." + SYSTEM_CATALOG_TABLE + " ("
 								+ PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " bigint) where "
 								+ PhoenixDatabaseMetaData.TABLE_SCHEM + "='" + schemaName + "' and "
 								+ PhoenixDatabaseMetaData.TABLE_NAME + "='" + indxTable + "'"));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
index 828051f..c6d7db9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
@@ -176,7 +176,7 @@ public class CoalesceFunctionIT extends ParallelStatsDisabledIT {
         //second param to coalesce is signed int
         ResultSet rs = conn.createStatement().executeQuery(
                 "SELECT "
-                + " COALESCE(NTH_VALUE(\"COUNT\", 100) WITHIN GROUP (ORDER BY COUNT DESC), 1) "
+                + " COALESCE(NTH_VALUE(\"COUNT\", 100) WITHIN GROUP (ORDER BY \"COUNT\" DESC), 1) "
                 + " FROM " + tableName
                 + " GROUP BY ID");
 
@@ -192,14 +192,14 @@ public class CoalesceFunctionIT extends ParallelStatsDisabledIT {
 
         String ddl = "CREATE TABLE " + tableName + "("
                 + "    ID BIGINT NOT NULL, "
-                + "    DATE TIMESTAMP NOT NULL, "
+                + "    \"DATE\" TIMESTAMP NOT NULL, "
                 + "    \"COUNT\" BIGINT "
-                + "    CONSTRAINT pk PRIMARY KEY(ID, DATE))";
+                + "    CONSTRAINT pk PRIMARY KEY(ID, \"DATE\"))";
         conn.createStatement().execute(ddl);
 
-        conn.createStatement().execute("UPSERT INTO " + tableName + "(ID, DATE, \"COUNT\") VALUES(1, CURRENT_TIME(), 1)");
-        conn.createStatement().execute("UPSERT INTO " + tableName + "(ID, DATE, \"COUNT\") VALUES(1, CURRENT_TIME(), 2)");
-        conn.createStatement().execute("UPSERT INTO " + tableName + "(ID, DATE, \"COUNT\") VALUES(2, CURRENT_TIME(), 1)");
+        conn.createStatement().execute("UPSERT INTO " + tableName + "(ID, \"DATE\", \"COUNT\") VALUES(1, CURRENT_TIME(), 1)");
+        conn.createStatement().execute("UPSERT INTO " + tableName + "(ID, \"DATE\", \"COUNT\") VALUES(1, CURRENT_TIME(), 2)");
+        conn.createStatement().execute("UPSERT INTO " + tableName + "(ID, \"DATE\", \"COUNT\") VALUES(2, CURRENT_TIME(), 1)");
         conn.commit();
 
         //second param to coalesce is signed int

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index cec95f4..96ba71d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -98,7 +98,7 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
                 "                data.stat_minute VARCHAR ,\n" + 
                 "                data.state VARCHAR ,\n" + 
                 "                data.title VARCHAR ,\n" + 
-                "                data.user VARCHAR ,\n" + 
+                "                data.\"user\" VARCHAR ,\n" + 
                 "                data.inrow VARCHAR ,\n" + 
                 "                data.jobid VARCHAR ,\n" + 
                 "                data.jobtype VARCHAR ,\n" + 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
index 116c47f..599e601 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
@@ -17,6 +17,8 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -163,7 +165,7 @@ public class IndexToolForPartialBuildIT extends BaseOwnClusterIT {
 			rs = conn.createStatement()
 					.executeQuery(String.format("SELECT " + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + ","
 							+ PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " FROM "
-							+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " ("
+							+"\""+ SYSTEM_CATALOG_SCHEMA + "\"." + SYSTEM_CATALOG_TABLE + " ("
 							+ PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " bigint) where "
 							+ PhoenixDatabaseMetaData.TABLE_SCHEM + "='" + schemaName + "' and "
 							+ PhoenixDatabaseMetaData.TABLE_NAME + "='" + indxTable + "'"));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceBulkAllocationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceBulkAllocationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceBulkAllocationIT.java
index 7ea3477..c057f49 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceBulkAllocationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceBulkAllocationIT.java
@@ -61,9 +61,9 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
 
     private static final long BATCH_SIZE = 3;
     private static final String SELECT_NEXT_VALUE_SQL =
-            "SELECT NEXT VALUE FOR %s FROM SYSTEM.\"SEQUENCE\" LIMIT 1";
+            "SELECT NEXT VALUE FOR %s FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1";
     private static final String SELECT_CURRENT_VALUE_SQL =
-            "SELECT CURRENT VALUE FOR %s FROM SYSTEM.\"SEQUENCE\" LIMIT 1";
+            "SELECT CURRENT VALUE FOR %s FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1";
     private static final String CREATE_SEQUENCE_NO_MIN_MAX_TEMPLATE =
             "CREATE SEQUENCE bulkalloc.alpha START WITH %s INCREMENT BY %s CACHE %s";
     private static final String CREATE_SEQUENCE_WITH_MIN_MAX_TEMPLATE =
@@ -107,7 +107,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         nextConnection();
         try {
             conn.createStatement().executeQuery(
-                "SELECT NEXT NULL VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\" LIMIT 1");
+                "SELECT NEXT NULL VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1");
             fail("null is not allowed to be used for <n> in NEXT <n> VALUES FOR <seq>");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT.getErrorCode(),
@@ -121,7 +121,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         nextConnection();    
         try {
             conn.createStatement().executeQuery(
-                "SELECT NEXT '89b' VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\" LIMIT 1");
+                "SELECT NEXT '89b' VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1");
             fail("Only integers and longs are allowed to be used for <n> in NEXT <n> VALUES FOR <seq>");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT.getErrorCode(),
@@ -136,7 +136,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         nextConnection();
         try {
             conn.createStatement().executeQuery(
-                "SELECT NEXT '-1' VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\" LIMIT 1");
+                "SELECT NEXT '-1' VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1");
             fail("null is not allowed to be used for <n> in NEXT <n> VALUES FOR <seq>");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT.getErrorCode(),
@@ -150,7 +150,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         nextConnection();    
         try {
             conn.createStatement().executeQuery(
-                "SELECT NEXT 0 VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\" LIMIT 1");
+                "SELECT NEXT 0 VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1");
             fail("Only integers and longs are allowed to be used for <n> in NEXT <n> VALUES FOR <seq>");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT.getErrorCode(),
@@ -415,7 +415,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         try {
             conn.createStatement().executeQuery(
                         "SELECT NEXT " + sequenceProps.numAllocated
-                                + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\" LIMIT 1");
+                                + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1");
             fail("Invoking SELECT NEXT VALUES should have thrown Reached Max Value Exception");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE.getErrorCode(),
@@ -453,7 +453,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         try {
             conn.createStatement().executeQuery(
                         "SELECT NEXT " + sequenceProps.numAllocated
-                                + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\" LIMIT 1");
+                                + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1");
             fail("Invoking SELECT NEXT VALUES should have thrown Reached Max Value Exception");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE.getErrorCode(),
@@ -543,7 +543,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         try {
             conn.createStatement().executeQuery(
                 "SELECT NEXT " + Long.MAX_VALUE
-                        + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                        + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE.getErrorCode(),
                 e.getErrorCode());
@@ -644,7 +644,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         try {
              conn.createStatement().executeQuery(
                         "SELECT NEXT " + sequenceProps.numAllocated
-                                + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\" LIMIT 1");
+                                + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\" LIMIT 1");
             fail("Invoking SELECT NEXT VALUES should have failed as operation is not supported for sequences with Cycles.");
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED.getErrorCode(),
@@ -682,7 +682,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         int startValueAfterAllocation = 101;
         ResultSet rs =
                 conn.createStatement().executeQuery(
-                    "SELECT CURRENT VALUE FOR bulkalloc.alpha, NEXT " + props.numAllocated + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                    "SELECT CURRENT VALUE FOR bulkalloc.alpha, NEXT " + props.numAllocated + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertBulkAllocationSucceeded(props, currentValueAfterAllocation, startValueAfterAllocation);
         int currentValueFor = rs.getInt(1);
@@ -720,7 +720,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         int startValueAfterAllocation = 101;
         ResultSet rs =
                 conn.createStatement().executeQuery(
-                    "SELECT NEXT 5 VALUES FOR bulkalloc.alpha, NEXT " + props.numAllocated + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                    "SELECT NEXT 5 VALUES FOR bulkalloc.alpha, NEXT " + props.numAllocated + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         int firstValue = rs.getInt(1);
         int secondValue = rs.getInt(2);
@@ -761,7 +761,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
                     + "NEXT " + props.numAllocated + " VALUES FOR bulkalloc.alpha, "
                     + "CURRENT VALUE FOR bulkalloc.alpha, "
                     + "NEXT 999 VALUES FOR bulkalloc.alpha "
-                    + "FROM SYSTEM.\"SEQUENCE\"");
+                    + "FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertBulkAllocationSucceeded(props, currentValueAfterAllocation, startValueAfterAllocation);
         
@@ -801,7 +801,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         // Bulk Allocate Sequence Slots for Two Sequences
         ResultSet rs =
                 conn.createStatement().executeQuery(
-                    "SELECT NEXT 100 VALUES FOR bulkalloc.alpha, NEXT 1000 VALUES FOR bulkalloc.beta FROM SYSTEM.\"SEQUENCE\"");
+                    "SELECT NEXT 100 VALUES FOR bulkalloc.alpha, NEXT 1000 VALUES FOR bulkalloc.beta FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertEquals(30, rs.getInt(1));
         assertEquals(100, rs.getInt(2));
@@ -919,7 +919,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
                 public Long call() throws Exception {
                     ResultSet rs =
                             conn.createStatement().executeQuery(
-                                "SELECT NEXT " + numSlotToAllocate + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                                "SELECT NEXT " + numSlotToAllocate + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
                     latch1.countDown(); // Allows NEXT VALUE FOR thread to proceed
                     latch2.await(); // Waits until NEXT VALUE FOR thread reads and increments currentValue
                     rs.next();
@@ -985,7 +985,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
                 public Long call() throws Exception {
                     ResultSet rs =
                             conn.createStatement().executeQuery(
-                                "SELECT NEXT " + numSlotToAllocate1 + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                                "SELECT NEXT " + numSlotToAllocate1 + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
                     rs.next();
                     latch1.countDown(); // Allows other thread to proceed
                     latch2.await(); 
@@ -1001,7 +1001,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
                     latch1.await(); // Wait for other thread to execut of NEXT <n> VALUES FOR expression
                     ResultSet rs =
                             conn.createStatement().executeQuery(
-                                "SELECT NEXT " + numSlotToAllocate2 + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                                "SELECT NEXT " + numSlotToAllocate2 + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
                     rs.next();
                     long retVal = rs.getLong(1);
                     latch2.countDown(); // Allow thread to completed
@@ -1054,7 +1054,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
                 public Long call() throws Exception {
                     ResultSet rs =
                             conn.createStatement().executeQuery(
-                                "SELECT NEXT " + numSlotToAllocate1 + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                                "SELECT NEXT " + numSlotToAllocate1 + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
                     latch1.countDown(); // Allows other thread to proceed
                     latch2.await(); 
                     rs.next();
@@ -1070,7 +1070,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
                     latch1.await(); // Wait for other thread to execut of NEXT <n> VALUES FOR expression
                     ResultSet rs =
                             conn.createStatement().executeQuery(
-                                "SELECT NEXT " + numSlotToAllocate2 + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                                "SELECT NEXT " + numSlotToAllocate2 + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
                     rs.next();
                     long retVal = rs.getLong(1);
                     latch2.countDown(); // Allow thread to completed
@@ -1124,14 +1124,14 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
             throws SQLException {
         ResultSet rs =
                 conn.createStatement().executeQuery(
-                    "SELECT NEXT " + numSlotToAllocate + " VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+                    "SELECT NEXT " + numSlotToAllocate + " VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertEquals(expectedValue, rs.getInt(1));
     }
 
     private void reserveSlotsInBulkUsingBindsAndAssertValue(int expectedValue, long numSlotToAllocate)
             throws SQLException {
-        PreparedStatement ps = conn.prepareStatement("SELECT NEXT ? VALUES FOR bulkalloc.alpha FROM SYSTEM.\"SEQUENCE\"");
+        PreparedStatement ps = conn.prepareStatement("SELECT NEXT ? VALUES FOR bulkalloc.alpha FROM \"SYSTEM\".\"SEQUENCE\"");
         ps.setLong(1, numSlotToAllocate);
         ResultSet rs = ps.executeQuery();
         assertTrue(rs.next());
@@ -1195,7 +1195,7 @@ public class SequenceBulkAllocationIT extends BaseClientManagedTimeIT {
         ResultSet rs =
                 conn.createStatement()
                         .executeQuery(
-                            "SELECT start_with, current_value, increment_by, cache_size, min_value, max_value, cycle_flag, sequence_schema, sequence_name FROM SYSTEM.\"SEQUENCE\"");
+                            "SELECT start_with, current_value, increment_by, cache_size, min_value, max_value, cycle_flag, sequence_schema, sequence_name FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertEquals(props.startsWith, rs.getLong("start_with"));
         assertEquals(props.incrementBy, rs.getLong("increment_by"));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 3ed4fd7..eec567e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -54,8 +54,8 @@ import com.google.common.collect.Lists;
 
 
 public class SequenceIT extends BaseClientManagedTimeIT {
-    private static final String NEXT_VAL_SQL = "SELECT NEXT VALUE FOR foo.bar FROM SYSTEM.\"SEQUENCE\"";
-    private static final String SELECT_NEXT_VALUE_SQL = "SELECT NEXT VALUE FOR %s FROM SYSTEM.\"SEQUENCE\"";
+    private static final String NEXT_VAL_SQL = "SELECT NEXT VALUE FOR foo.bar FROM \"SYSTEM\".\"SEQUENCE\"";
+    private static final String SELECT_NEXT_VALUE_SQL = "SELECT NEXT VALUE FOR %s FROM \"SYSTEM\".\"SEQUENCE\"";
     private static final long BATCH_SIZE = 3;
    
     private Connection conn;
@@ -80,7 +80,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
 	@Test
 	public void testSystemTable() throws Exception {		
 		nextConnection();
-		String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\"";
+		String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM \"SYSTEM\".\"SEQUENCE\"";
 		ResultSet rs = conn.prepareStatement(query).executeQuery();
 		assertFalse(rs.next());
 	}
@@ -102,7 +102,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
 	@Test
 	public void testSequenceNotFound() throws Exception {
         nextConnection();
-		String query = "SELECT NEXT value FOR qwert.asdf FROM SYSTEM.\"SEQUENCE\"";
+		String query = "SELECT NEXT value FOR qwert.asdf FROM \"SYSTEM\".\"SEQUENCE\"";
 		try {
 			conn.prepareStatement(query).executeQuery();
 			fail("Sequence not found");
@@ -136,7 +136,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         conn.createStatement().execute("USE " + sequenceSchemaName);
         conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " START WITH 2 INCREMENT BY 4");
         nextConnection(props);
-        String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\" WHERE sequence_name='"
+        String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='"
                 + sequenceName + "'";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
@@ -159,7 +159,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         nextConnection();
         conn.createStatement().execute("CREATE SEQUENCE alpha.omega START WITH 2 INCREMENT BY 4");
         nextConnection();
-        String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\" WHERE sequence_name='OMEGA'";
+        String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='OMEGA'";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals("ALPHA", rs.getString("sequence_schema"));
@@ -176,7 +176,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         conn.createStatement().execute("CREATE SEQUENCE used.nowhere START WITH 2 INCREMENT BY 4");
         nextConnection();
         try {
-            rs = conn.createStatement().executeQuery("SELECT CURRENT VALUE FOR used.nowhere FROM SYSTEM.\"SEQUENCE\"");
+            rs = conn.createStatement().executeQuery("SELECT CURRENT VALUE FOR used.nowhere FROM \"SYSTEM\".\"SEQUENCE\"");
             rs.next();
             fail();
         } catch (SQLException e) {
@@ -184,10 +184,10 @@ public class SequenceIT extends BaseClientManagedTimeIT {
             assertTrue(e.getNextException()==null);
         }
         
-        rs = conn.createStatement().executeQuery("SELECT NEXT VALUE FOR used.nowhere FROM SYSTEM.\"SEQUENCE\"");
+        rs = conn.createStatement().executeQuery("SELECT NEXT VALUE FOR used.nowhere FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertEquals(2, rs.getInt(1));
-        rs = conn.createStatement().executeQuery("SELECT CURRENT VALUE FOR used.nowhere FROM SYSTEM.\"SEQUENCE\"");
+        rs = conn.createStatement().executeQuery("SELECT CURRENT VALUE FOR used.nowhere FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertEquals(2, rs.getInt(1));
 	}
@@ -197,7 +197,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         nextConnection();
         conn.createStatement().execute("CREATE SEQUENCE alpha.omega START WITH 2 INCREMENT BY 4");
         nextConnection();
-        String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\" WHERE sequence_name='OMEGA'";
+        String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='OMEGA'";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals("ALPHA", rs.getString("sequence_schema"));
@@ -208,7 +208,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
 
         conn.createStatement().execute("DROP SEQUENCE alpha.omega");
         nextConnection();
-        query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM SYSTEM.\"SEQUENCE\" WHERE sequence_name='OMEGA'";
+        query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='OMEGA'";
         rs = conn.prepareStatement(query).executeQuery();
         assertFalse(rs.next());
 
@@ -255,7 +255,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         ResultSet rs =
                 conn.createStatement()
                         .executeQuery(
-                            "SELECT start_with, current_value, increment_by, cache_size, min_value, max_value, cycle_flag, sequence_schema, sequence_name FROM SYSTEM.\"SEQUENCE\"");
+                            "SELECT start_with, current_value, increment_by, cache_size, min_value, max_value, cycle_flag, sequence_schema, sequence_name FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertEquals(2, rs.getLong("start_with"));
         assertEquals(2, rs.getInt("current_value"));
@@ -270,7 +270,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         rs =
                 conn.createStatement()
                         .executeQuery(
-                            "SELECT NEXT VALUE FOR alpha.gamma, CURRENT VALUE FOR alpha.gamma FROM SYSTEM.\"SEQUENCE\"");
+                            "SELECT NEXT VALUE FOR alpha.gamma, CURRENT VALUE FOR alpha.gamma FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertEquals(2, rs.getLong(1));
         assertEquals(2, rs.getLong(2));
@@ -278,7 +278,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         rs =
                 conn.createStatement()
                         .executeQuery(
-                            "SELECT CURRENT VALUE FOR alpha.gamma, NEXT VALUE FOR alpha.gamma FROM SYSTEM.\"SEQUENCE\"");
+                            "SELECT CURRENT VALUE FOR alpha.gamma, NEXT VALUE FOR alpha.gamma FROM \"SYSTEM\".\"SEQUENCE\"");
         assertTrue(rs.next());
         assertEquals(5, rs.getLong(1));
         assertEquals(5, rs.getLong(2));
@@ -290,7 +290,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         nextConnection();
         conn.createStatement().execute("CREATE SEQUENCE alpha.zeta START WITH 4 INCREMENT BY 7");
         nextConnection();
-        String query = "SELECT NEXT VALUE FOR alpha.zeta, NEXT VALUE FOR alpha.zeta FROM SYSTEM.\"SEQUENCE\"";
+        String query = "SELECT NEXT VALUE FOR alpha.zeta, NEXT VALUE FOR alpha.zeta FROM \"SYSTEM\".\"SEQUENCE\"";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals(4, rs.getInt(1));
@@ -305,7 +305,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         conn.createStatement().execute("CREATE SEQUENCE alpha.zeta START WITH 4 INCREMENT BY 7");
         conn.createStatement().execute("CREATE SEQUENCE alpha.kappa START WITH 9 INCREMENT BY 2");
         nextConnection();
-        String query = "SELECT NEXT VALUE FOR alpha.zeta, NEXT VALUE FOR alpha.kappa FROM SYSTEM.\"SEQUENCE\"";
+        String query = "SELECT NEXT VALUE FOR alpha.zeta, NEXT VALUE FOR alpha.kappa FROM \"SYSTEM\".\"SEQUENCE\"";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals(4, rs.getInt(1));
@@ -337,7 +337,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
             "CREATE SEQUENCE alpha.kappa START WITH 9 INCREMENT BY -2 MINVALUE 5");
         nextConnection();
         String query =
-                "SELECT NEXT VALUE FOR alpha.zeta, NEXT VALUE FOR alpha.kappa FROM SYSTEM.\"SEQUENCE\"";
+                "SELECT NEXT VALUE FOR alpha.zeta, NEXT VALUE FOR alpha.kappa FROM \"SYSTEM\".\"SEQUENCE\"";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals(4, rs.getInt(1));
@@ -377,7 +377,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
             "CREATE SEQUENCE alpha.kappa START WITH 9 INCREMENT BY -2 MINVALUE 5 MAXVALUE 9 CYCLE");
         nextConnection();
         String query =
-                "SELECT NEXT VALUE FOR alpha.zeta, NEXT VALUE FOR alpha.kappa FROM SYSTEM.\"SEQUENCE\"";
+                "SELECT NEXT VALUE FOR alpha.zeta, NEXT VALUE FOR alpha.kappa FROM \"SYSTEM\".\"SEQUENCE\"";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals(4, rs.getInt(1));
@@ -702,7 +702,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         		"CLIENT RESERVE VALUES FROM 1 SEQUENCE", QueryUtil.getExplainPlan(rs));
         
         nextConnection();
-        rs = conn.createStatement().executeQuery("SELECT sequence_name, current_value FROM SYSTEM.\"SEQUENCE\" WHERE sequence_name='BAR'");
+        rs = conn.createStatement().executeQuery("SELECT sequence_name, current_value FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='BAR'");
         assertTrue(rs.next());
         assertEquals("BAR", rs.getString(1));
         assertEquals(1, rs.getInt(2));
@@ -724,7 +724,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         nextConnection();
         conn.createStatement().execute("CREATE SEQUENCE foo.bar START WITH 3 INCREMENT BY 2");
         nextConnection();
-        String query = "SELECT LPAD(ENCODE(NEXT VALUE FOR foo.bar,'base62'),5,'0') FROM SYSTEM.\"SEQUENCE\"";
+        String query = "SELECT LPAD(ENCODE(NEXT VALUE FOR foo.bar,'base62'),5,'0') FROM \"SYSTEM\".\"SEQUENCE\"";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals("00003", rs.getString(1));
@@ -1356,7 +1356,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         nextConnection();
         conn.createStatement().execute("CREATE SEQUENCE alpha.zeta START WITH 3 INCREMENT BY 2 CACHE 5");
         nextConnection();
-        String query = "SELECT NEXT VALUE FOR alpha.zeta FROM SYSTEM.\"SEQUENCE\"";
+        String query = "SELECT NEXT VALUE FOR alpha.zeta FROM \"SYSTEM\".\"SEQUENCE\"";
         ResultSet rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals(3, rs.getInt(1));
@@ -1368,7 +1368,7 @@ public class SequenceIT extends BaseClientManagedTimeIT {
         conn.close();
         
         // verify that calling close() does not return sequence values back to the server
-        query = "SELECT CURRENT_VALUE FROM SYSTEM.\"SEQUENCE\" WHERE SEQUENCE_SCHEMA='ALPHA' AND SEQUENCE_NAME='ZETA'";
+        query = "SELECT CURRENT_VALUE FROM \"SYSTEM\".\"SEQUENCE\" WHERE SEQUENCE_SCHEMA='ALPHA' AND SEQUENCE_NAME='ZETA'";
         rs = conn.prepareStatement(query).executeQuery();
         assertTrue(rs.next());
         assertEquals(13, rs.getInt(1));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
index a11f808..dc04b53 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
@@ -161,7 +161,7 @@ public class SpillableGroupByIT extends BaseOwnClusterIT {
         stmt.execute("UPSERT INTO T1 VALUES (3, 'NAME3')");
         conn.commit();
         stmt.execute("UPDATE STATISTICS T1");
-        ResultSet rs = stmt.executeQuery("SELECT * FROM SYSTEM.STATS");
+        ResultSet rs = stmt.executeQuery("SELECT * FROM \"SYSTEM\".STATS");
         assertFalse(rs.next());
         rs.close();
         stmt.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index 040c92c..65f0089 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -17,6 +17,8 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.apache.phoenix.util.TestUtil.getAllSplits;
 import static org.junit.Assert.assertEquals;
@@ -260,8 +262,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         conn.createStatement().execute("upsert into " + fullTableName + " values ('abc',1,3)");
         conn.createStatement().execute("upsert into " + fullTableName + " values ('def',2,4)");
         conn.commit();
-        stmt = conn.prepareStatement("UPDATE STATISTICS " + fullTableName);
-        stmt.execute();
+        conn.createStatement().execute("UPDATE STATISTICS " + fullTableName);
         rs = conn.createStatement().executeQuery("SELECT k FROM " + fullTableName + " order by k desc");
         assertTrue(rs.next());
         assertEquals("def", rs.getString(1));
@@ -503,7 +504,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         keyRanges = getAllSplits(conn, tableName);
         assertEquals(nRows/2+1, keyRanges.size());
         ResultSet rs = conn.createStatement().executeQuery("SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM "
-                + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " WHERE PHYSICAL_NAME='" + physicalTableName + "'");
+                + "\""+ SYSTEM_CATALOG_SCHEMA + "\".\"" + SYSTEM_STATS_TABLE + "\"" + " WHERE PHYSICAL_NAME='" + physicalTableName + "'");
         rs.next();
         assertEquals(nRows - nDeletedRows, rs.getLong(1));
     }
@@ -560,7 +561,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         rs = conn
                 .createStatement()
                 .executeQuery(
-                        "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from SYSTEM.STATS where PHYSICAL_NAME = '"
+                        "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from \"SYSTEM\".STATS where PHYSICAL_NAME = '"
                                 + physicalTableName + "' GROUP BY COLUMN_FAMILY ORDER BY COLUMN_FAMILY");
 
         assertTrue(rs.next());
@@ -632,7 +633,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
             long c2Bytes = rows * (columnEncoded ? ( mutable ? 37 : 48 ) : 35);
             String physicalTableName = SchemaUtil.getPhysicalHBaseTableName(fullTableName, userTableNamespaceMapped, PTableType.TABLE).getString();
             rs = conn.createStatement().executeQuery(
-                    "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from SYSTEM.STATS where PHYSICAL_NAME = '"
+                    "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from \"SYSTEM\".STATS where PHYSICAL_NAME = '"
                             + physicalTableName + "' AND GUIDE_POST_KEY>= cast('" + strings[startIndex]
                             + "' as varbinary) AND  GUIDE_POST_KEY<cast('" + strings[endIndex]
                             + "' as varbinary) and COLUMN_FAMILY='C2' group by COLUMN_FAMILY");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsPropIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsPropIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsPropIT.java
index 26ff629..8c2c944 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsPropIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsPropIT.java
@@ -42,7 +42,7 @@ public class StoreNullsPropIT extends ParallelStatsDisabledIT {
                 "id smallint primary key," +
                 "name varchar)");
 
-        ResultSet rs = stmt.executeQuery("SELECT store_nulls FROM SYSTEM.CATALOG " +
+        ResultSet rs = stmt.executeQuery("SELECT store_nulls FROM \"SYSTEM\".CATALOG " +
                 "WHERE table_name = 'WITH_NULLS_DEFAULT' AND store_nulls is not null");
         assertTrue(rs.next());
         assertTrue(rs.getBoolean(1));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index d36e0fe..0a20d47 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -216,7 +216,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 1)); // Execute at timestamp 1
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(false);
-        String upsert = "UPSERT INTO " + PTSDB_NAME + "(date, val, host) " +
+        String upsert = "UPSERT INTO " + PTSDB_NAME + "(\"DATE\", val, host) " +
             "SELECT current_date(), x_integer+2, entity_id FROM ATABLE WHERE a_integer >= ?";
         PreparedStatement upsertStmt = conn.prepareStatement(upsert);
         upsertStmt.setInt(1, 6);
@@ -225,7 +225,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         conn.commit();
         conn.close();
         
-        String query = "SELECT inst,host,date,val FROM " + PTSDB_NAME;
+        String query = "SELECT inst,host,\"DATE\",val FROM " + PTSDB_NAME;
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
         conn = DriverManager.getConnection(getUrl(), props);
         PreparedStatement statement = conn.prepareStatement(query);
@@ -262,8 +262,8 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 3));
         conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(true);
-        upsert = "UPSERT INTO " + PTSDB_NAME + "(date, val, inst) " +
-            "SELECT date+1, val*10, host FROM " + PTSDB_NAME;
+        upsert = "UPSERT INTO " + PTSDB_NAME + "(\"DATE\", val, inst) " +
+            "SELECT \"DATE\"+1, val*10, host FROM " + PTSDB_NAME;
         upsertStmt = conn.prepareStatement(upsert);
         rowsInserted = upsertStmt.executeUpdate();
         assertEquals(4, rowsInserted);
@@ -271,7 +271,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         conn.close();
         
         Date then = new Date(now.getTime() + QueryConstants.MILLIS_IN_DAY);
-        query = "SELECT host,inst, date,val FROM " + PTSDB_NAME + " where inst is not null";
+        query = "SELECT host,inst, \"DATE\",val FROM " + PTSDB_NAME + " where inst is not null";
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 4)); // Execute at timestamp 2
         conn = DriverManager.getConnection(getUrl(), props);
         statement = conn.prepareStatement(query);
@@ -392,7 +392,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 1)); // Execute at timestamp 1
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(autoCommit);
-        String upsert = "UPSERT INTO " + PTSDB_NAME + "(date, val, host) " +
+        String upsert = "UPSERT INTO " + PTSDB_NAME + "(\"DATE\", val, host) " +
             "SELECT current_date(), sum(a_integer), a_string FROM ATABLE GROUP BY a_string";
         PreparedStatement upsertStmt = conn.prepareStatement(upsert);
         int rowsInserted = upsertStmt.executeUpdate();
@@ -402,7 +402,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         }
         conn.close();
         
-        String query = "SELECT inst,host,date,val FROM " + PTSDB_NAME;
+        String query = "SELECT inst,host,\"DATE\",val FROM " + PTSDB_NAME;
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
         conn = DriverManager.getConnection(getUrl(), props);
         PreparedStatement statement = conn.prepareStatement(query);
@@ -431,7 +431,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 3));
         conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(true);
-        upsert = "UPSERT INTO " + PTSDB_NAME + "(date, val, host, inst) " +
+        upsert = "UPSERT INTO " + PTSDB_NAME + "(\"DATE\", val, host, inst) " +
             "SELECT current_date(), max(val), max(host), 'x' FROM " + PTSDB_NAME;
         upsertStmt = conn.prepareStatement(upsert);
         rowsInserted = upsertStmt.executeUpdate();
@@ -441,7 +441,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         }
         conn.close();
         
-        query = "SELECT inst,host,date,val FROM " + PTSDB_NAME + " WHERE inst='x'";
+        query = "SELECT inst,host,\"DATE\",val FROM " + PTSDB_NAME + " WHERE inst='x'";
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 4));
         conn = DriverManager.getConnection(getUrl(), props);
         statement = conn.prepareStatement(query);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java
index b76d61d..12add12 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java
@@ -755,8 +755,7 @@ public class IndexIT extends ParallelStatsDisabledIT {
             assertFalse(rs.next());
 
             String ddl = "DROP INDEX " + indexName + " ON " + fullTableName;
-            stmt = conn.prepareStatement(ddl);
-            stmt.execute();
+            conn.createStatement().execute(ddl);
 
             stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, v1) VALUES(?,?)");
             stmt.setString(1, "a");
@@ -1060,7 +1059,7 @@ public class IndexIT extends ParallelStatsDisabledIT {
         try (HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin(); 
                 Connection c = DriverManager.getConnection(getUrl())) {
             ResultSet rs = c.getMetaData().getTables("", 
-                    PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, 
+                    "\""+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + "\"", 
                     null, 
                     new String[] {PTableType.SYSTEM.toString()});
             ReadOnlyProps p = c.unwrap(PhoenixConnection.class).getQueryServices().getProps();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
index fd6703e..e9f0194 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
@@ -548,7 +548,7 @@ public class IndexMetadataIT extends ParallelStatsDisabledIT {
         
         ResultSet rs = conn.createStatement().executeQuery(
             "select table_name, " + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + " " +
-            "from system.catalog (" + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + " " + PDate.INSTANCE.getSqlTypeName() + ") " +
+            "from \"SYSTEM\".catalog (" + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + " " + PDate.INSTANCE.getSqlTypeName() + ") " +
             "where " + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + " is not null and table_name like 'ASYNCIND_%' " +
             "order by " + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE
         );
@@ -589,7 +589,7 @@ public class IndexMetadataIT extends ParallelStatsDisabledIT {
         
         ResultSet rs = conn.createStatement().executeQuery(
             "select table_name, " + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " " +
-            "from system.catalog (" + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + ") " +
+            "from \"SYSTEM\".catalog (" + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + ") " +
             "where " + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " !=0 and table_name like 'R_ASYNCIND_%' " +
             "order by table_name");
         assertTrue(rs.next());
@@ -602,7 +602,7 @@ public class IndexMetadataIT extends ParallelStatsDisabledIT {
         conn.createStatement().execute("ALTER INDEX "+indexName+"3 ON " + testTable +" DISABLE");
         rs = conn.createStatement().executeQuery(
                 "select table_name, " + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " " +
-                "from system.catalog (" + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + ") " +
+                "from \"SYSTEM\".catalog (" + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + ") " +
                 "where " + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " !=0 and table_name like 'ASYNCIND_%' " +
                 "order by table_name" );
         assertFalse(rs.next());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
index fb9776e..52af966 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
@@ -60,7 +60,7 @@ public class IndexTestUtil {
     // index table test.
     private static final String SELECT_DATA_INDEX_ROW = "SELECT " + COLUMN_FAMILY
             + " FROM "
-            + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE
+            + "\""+SYSTEM_CATALOG_SCHEMA +"\""+ ".\"" + SYSTEM_CATALOG_TABLE
             + "\" WHERE "
             + TENANT_ID + " IS NULL AND " + TABLE_SCHEM + "=? AND " + TABLE_NAME + "=? AND " + COLUMN_NAME + " IS NULL AND " + COLUMN_FAMILY + "=?";
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index f5135d9..a7d0028 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -562,7 +562,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
             String tableName = generateUniqueName();
             String indexName = generateUniqueName();
             statement.execute("create table " + tableName + " (id integer not null,fn varchar,"
-                    + "ln varchar constraint pk primary key(id)) DEFAULT_COLUMN_FAMILY='F'");
+                    + "\"ln\" varchar constraint pk primary key(id)) DEFAULT_COLUMN_FAMILY='F'");
             statement.execute("upsert into " + tableName + "  values(1,'fn','ln')");
             statement
                     .execute("create local index " + indexName + " on " + tableName + "  (fn)");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
index ceb8714..ff9036f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
@@ -84,7 +84,7 @@ public class UpdateCacheIT extends ParallelStatsDisabledIT {
 	
     @Test
     public void testUpdateCacheForNonTxnSystemTable() throws Exception {
-        String fullTableName = QueryConstants.SYSTEM_SCHEMA_NAME + QueryConstants.NAME_SEPARATOR + generateUniqueName();
+        String fullTableName = "\""+ QueryConstants.SYSTEM_SCHEMA_NAME + "\""+ QueryConstants.NAME_SEPARATOR + generateUniqueName();
         setupSystemTable(fullTableName);
         helpTestUpdateCache(fullTableName, null, new int[] {0, 0});
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
index badf39b..a5c1cf4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
@@ -327,8 +327,8 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
 
         Connection conn = DriverManager.getConnection(getUrl());
         // Put table in SYSTEM schema to prevent attempts to update the cache after we disable SYSTEM.CATALOG
-        conn.createStatement().execute("CREATE TABLE SYSTEM." + nonTxTableName + "(k INTEGER PRIMARY KEY, v VARCHAR)" + tableDDLOptions);
-        conn.createStatement().execute("UPSERT INTO SYSTEM." + nonTxTableName + " VALUES (1)");
+        conn.createStatement().execute("CREATE TABLE \"SYSTEM\"." + nonTxTableName + "(k INTEGER PRIMARY KEY, v VARCHAR)" + tableDDLOptions);
+        conn.createStatement().execute("UPSERT INTO \"SYSTEM\"." + nonTxTableName + " VALUES (1)");
         conn.commit();
         // Reset empty column value to an empty value like it is pre-transactions
         HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
@@ -342,7 +342,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
             // This will succeed initially in updating the HBase metadata, but then will fail when
             // the SYSTEM.CATALOG table is attempted to be updated, exercising the code to restore
             // the coprocessors back to the non transactional ones.
-            conn.createStatement().execute("ALTER TABLE SYSTEM." + nonTxTableName + " SET TRANSACTIONAL=true");
+            conn.createStatement().execute("ALTER TABLE \"SYSTEM\"." + nonTxTableName + " SET TRANSACTIONAL=true");
             fail();
         } catch (SQLException e) {
             assertTrue(e.getMessage().contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " is disabled"));
@@ -351,7 +351,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
             admin.close();
         }
         
-        ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM SYSTEM." + nonTxTableName + " WHERE v IS NULL");
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM \"SYSTEM\"." + nonTxTableName + " WHERE v IS NULL");
         assertTrue(rs.next());
         assertEquals(1,rs.getInt(1));
         assertFalse(rs.next());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/92e728e0/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 02e4ec2..5bd3ef1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -145,7 +145,7 @@ public class UpgradeUtil {
 
     public static String SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW = "SELECT "
             + "BASE_COLUMN_COUNT "
-            + "FROM SYSTEM.CATALOG "
+            + "FROM \"SYSTEM\".CATALOG "
             + "WHERE "
             + "COLUMN_NAME IS NULL "
             + "AND "


[19/46] phoenix git commit: fix some more bugs and clean code

Posted by td...@apache.org.
fix some more bugs and clean code


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f5f86341
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f5f86341
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f5f86341

Branch: refs/heads/omid
Commit: f5f86341a7eb4a721a3e924817875ed7c676e6a0
Parents: f584e5f
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Sun Apr 9 17:36:10 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Sun Apr 9 17:36:10 2017 +0300

----------------------------------------------------------------------
 .../phoenix/tx/FlappingTransactionIT.java       |   3 -
 .../org/apache/phoenix/tx/TransactionIT.java    |  16 +-
 .../org/apache/phoenix/tx/TxCheckpointIT.java   |  14 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   1 -
 .../PhoenixTransactionalProcessor.java          |   4 +-
 .../phoenix/coprocessor/ScanRegionObserver.java |   1 -
 .../UngroupedAggregateRegionObserver.java       |   6 +-
 .../apache/phoenix/execute/MutationState.java   |   3 +
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   1 -
 .../apache/phoenix/schema/MetaDataClient.java   |   4 +-
 .../transaction/OmidTransactionContext.java     |  13 ++
 .../transaction/PhoenixTransactionContext.java  |  20 +-
 .../transaction/TephraTransactionContext.java   | 224 +++++++++----------
 .../java/org/apache/phoenix/util/IndexUtil.java |   4 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java |   4 +-
 .../apache/phoenix/util/TransactionUtil.java    |   1 -
 16 files changed, 166 insertions(+), 153 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
index d34f403..0bc7c24 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
@@ -47,10 +47,7 @@ import org.apache.phoenix.transaction.PhoenixTransactionalTable;
 import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TestUtil;
-import org.apache.tephra.TransactionContext;
 import org.apache.tephra.TransactionSystemClient;
-import org.apache.tephra.TxConstants;
-import org.apache.tephra.hbase.TransactionAwareHTable;
 import org.junit.Test;
 
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index 1399f6c..ff2bf6b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -53,11 +53,11 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
-import org.apache.tephra.TxConstants;
 import org.junit.Ignore;
 import org.junit.Test;
 
@@ -374,21 +374,21 @@ public class TransactionIT extends ParallelStatsDisabledIT {
         for (HColumnDescriptor colDesc : desc.getFamilies()) {
             assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
             assertEquals(1000, colDesc.getTimeToLive());
-            assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
+            assertEquals(1000, Integer.parseInt(colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL)));
         }
 
         desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("IDX1"));
         for (HColumnDescriptor colDesc : desc.getFamilies()) {
             assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
             assertEquals(1000, colDesc.getTimeToLive());
-            assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
+            assertEquals(1000, Integer.parseInt(colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL)));
         }
         
         desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("IDX2"));
         for (HColumnDescriptor colDesc : desc.getFamilies()) {
             assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
             assertEquals(1000, colDesc.getTimeToLive());
-            assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
+            assertEquals(1000, Integer.parseInt(colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL)));
         }
         
         conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "2(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)");
@@ -397,14 +397,14 @@ public class TransactionIT extends ParallelStatsDisabledIT {
         for (HColumnDescriptor colDesc : desc.getFamilies()) {
             assertEquals(10, colDesc.getMaxVersions());
             assertEquals(HColumnDescriptor.DEFAULT_TTL, colDesc.getTimeToLive());
-            assertEquals(null, colDesc.getValue(TxConstants.PROPERTY_TTL));
+            assertEquals(null, colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL));
         }
         conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "2 SET TTL=1000");
         desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes( nonTxTableName + "2"));
         for (HColumnDescriptor colDesc : desc.getFamilies()) {
             assertEquals(10, colDesc.getMaxVersions());
             assertEquals(1000, colDesc.getTimeToLive());
-            assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
+            assertEquals(1000, Integer.parseInt(colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL)));
         }
 
         conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "3(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)");
@@ -434,7 +434,7 @@ public class TransactionIT extends ParallelStatsDisabledIT {
         for (HColumnDescriptor colDesc : desc.getFamilies()) {
             assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
             assertEquals(HColumnDescriptor.DEFAULT_TTL, colDesc.getTimeToLive());
-            assertEquals(1000, Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
+            assertEquals(1000, Integer.parseInt(colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL)));
         }
     }
     
@@ -466,7 +466,7 @@ public class TransactionIT extends ParallelStatsDisabledIT {
         admin.createTable(desc);
         ddl = "CREATE TABLE " + t2 + " (k varchar primary key) transactional=true";
         conn.createStatement().execute(ddl);
-        assertEquals(Boolean.TRUE.toString(), admin.getTableDescriptor(TableName.valueOf(t2)).getValue(TxConstants.READ_NON_TX_DATA));
+        assertEquals(Boolean.TRUE.toString(), admin.getTableDescriptor(TableName.valueOf(t2)).getValue(PhoenixTransactionContext.READ_NON_TX_DATA));
         
         // Should be ok, as HBase metadata should match existing metadata.
         ddl = "CREATE TABLE IF NOT EXISTS " + t1 + " (k varchar primary key)"; 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
index 246ecd4..aac9586 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
@@ -36,9 +36,9 @@ import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
-import org.apache.tephra.Transaction.VisibilityLevel;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -254,7 +254,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
 		long wp = state.getWritePointer();
 		conn.createStatement().execute(
 				"upsert into " + fullTableName + " select max(id)+1, 'a4', 'b4' from " + fullTableName + "");
-		assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
+		assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
 				state.getVisibilityLevel());
 		assertEquals(wp, state.getWritePointer()); // Make sure write ptr
 													// didn't move
@@ -266,7 +266,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
 
 		conn.createStatement().execute(
 				"upsert into " + fullTableName + " select max(id)+1, 'a5', 'b5' from " + fullTableName + "");
-		assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
+		assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
 				state.getVisibilityLevel());
 		assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr
 														// moves
@@ -279,7 +279,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
 		
 		conn.createStatement().execute(
 				"upsert into " + fullTableName + " select max(id)+1, 'a6', 'b6' from " + fullTableName + "");
-		assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
+		assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
 				state.getVisibilityLevel());
 		assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr
 														// moves
@@ -318,7 +318,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
 	        state.startTransaction();
 	        long wp = state.getWritePointer();
 	        conn.createStatement().execute("delete from " + fullTableName + "1 where id1=fk1b AND fk1b=id1");
-	        assertEquals(VisibilityLevel.SNAPSHOT, state.getVisibilityLevel());
+	        assertEquals(PhoenixVisibilityLevel.SNAPSHOT, state.getVisibilityLevel());
 	        assertEquals(wp, state.getWritePointer()); // Make sure write ptr didn't move
 	
 	        rs = conn.createStatement().executeQuery("select /*+ NO_INDEX */ id1 from " + fullTableName + "1");
@@ -336,7 +336,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
 	        assertFalse(rs.next());
 	
 	        conn.createStatement().execute("delete from " + fullTableName + "1 where id1 in (select fk1a from " + fullTableName + "1 join " + fullTableName + "2 on (fk2=id1))");
-	        assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
+	        assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
 	        assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr moved
 	
 	        rs = conn.createStatement().executeQuery("select /*+ NO_INDEX */ id1 from " + fullTableName + "1");
@@ -353,7 +353,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
             stmt.executeUpdate("upsert into " + fullTableName + "2 values (2, 4)");
 
             conn.createStatement().execute("delete from " + fullTableName + "1 where id1 in (select fk1a from " + fullTableName + "1 join " + fullTableName + "2 on (fk2=id1))");
-            assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
+            assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
             assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr moved
     
             rs = conn.createStatement().executeQuery("select /*+ NO_INDEX */ id1 from " + fullTableName + "1");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 321d117..705af86 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -58,7 +58,6 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.apache.tephra.Transaction;
 
 import com.google.common.collect.ImmutableList;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixTransactionalProcessor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixTransactionalProcessor.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixTransactionalProcessor.java
index 8693681..37fa2ab 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixTransactionalProcessor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixTransactionalProcessor.java
@@ -17,12 +17,12 @@
  */
 package org.apache.phoenix.coprocessor;
 
-import org.apache.tephra.hbase.coprocessor.TransactionProcessor;
+import org.apache.phoenix.transaction.TransactionFactory;
 
 public class PhoenixTransactionalProcessor extends DelegateRegionObserver {
 
     public PhoenixTransactionalProcessor() {
-        super(new TransactionProcessor());
+        super(TransactionFactory.getTransactionFactory().getTransactionContext().getCoProcessor());
     }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 0e0e3ba..6f7198e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -64,7 +64,6 @@ import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.apache.tephra.Transaction;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index db3c792..9e4f39d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -108,6 +108,7 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDouble;
 import org.apache.phoenix.schema.types.PFloat;
 import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
@@ -116,7 +117,6 @@ import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TimeKeeper;
-import org.apache.tephra.TxConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -560,7 +560,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                                 firstKV.getRowOffset(), firstKV.getRowLength(),ts);
                             mutations.add(delete);
                             // force tephra to ignore this deletes
-                            delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
+                            delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                         } else if (isUpsert) {
                             Arrays.fill(values, null);
                             int bucketNumOffset = 0;
@@ -624,7 +624,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                                     results.get(0).getRowLength());
                                 delete.deleteColumns(deleteCF,  deleteCQ, ts);
                                 // force tephra to ignore this deletes
-                                delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
+                                delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                                 mutations.add(delete);
                             }
                         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 23c8b2a..8e26bdc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -282,6 +282,9 @@ public class MutationState implements SQLCloseable {
 
             phoenixTransactionContext.checkpoint(hasUncommittedData);
 
+            if (hasUncommittedData) {
+                uncommittedPhysicalNames.clear();
+            }
             return true;
         }
         return false;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index d387ab7..288277f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -113,7 +113,6 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.SchemaUtil;
-import org.apache.tephra.TransactionContext;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Objects;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index cc2b5b9..42d1431 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -199,6 +199,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PUnsignedLong;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
@@ -211,7 +212,6 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TransactionUtil;
 import org.apache.phoenix.util.UpgradeUtil;
-import org.apache.tephra.TxConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -1847,7 +1847,7 @@ public class MetaDataClient {
                 // If TTL set, use Tephra TTL property name instead
                 Object ttl = commonFamilyProps.remove(HColumnDescriptor.TTL);
                 if (ttl != null) {
-                    commonFamilyProps.put(TxConstants.PROPERTY_TTL, ttl);
+                    commonFamilyProps.put(PhoenixTransactionContext.PROPERTY_TTL, ttl);
                 }
             }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index 8a4e284..d122d0c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -3,6 +3,7 @@ package org.apache.phoenix.transaction;
 import java.sql.SQLException;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.phoenix.schema.PTable;
 import org.slf4j.Logger;
 
@@ -103,4 +104,16 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
         // TODO Auto-generated method stub
         return 0;
     }
+
+    @Override
+    public boolean isPreExistingVersion(long version) {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public BaseRegionObserver getCoProcessor() {
+        // TODO Auto-generated method stub
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index bd63930..0854f4e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -1,5 +1,6 @@
 package org.apache.phoenix.transaction;
 
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.phoenix.schema.PTable;
 import org.slf4j.Logger;
 
@@ -19,7 +20,11 @@ public interface PhoenixTransactionContext {
         SNAPSHOT_ALL
       }
 
-    public static final String TX_ROLLBACK_ATTRIBUTE_KEY = "phoenix.tx.rollback"; 
+    public static final String TX_ROLLBACK_ATTRIBUTE_KEY = "tephra.tx.rollback"; //"phoenix.tx.rollback"; 
+
+    public static final String PROPERTY_TTL = "dataset.table.ttl";
+
+    public static final String READ_NON_TX_DATA = "data.tx.read.pre.existing";
 
     /**
      * Starts a transaction
@@ -120,4 +125,17 @@ public interface PhoenixTransactionContext {
      * @return max transactions per second
      */
     public long getMaxTransactionsPerSecond();
+
+    /**
+     *
+     * @param version
+     * @return
+     */
+    public boolean isPreExistingVersion(long version);
+
+    /**
+     *
+     * @return the coprocessor
+     */
+    public BaseRegionObserver getCoProcessor();
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index cfa3ac3..a5e6e64 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -8,6 +8,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -25,7 +26,9 @@ import org.apache.tephra.TransactionManager;
 import org.apache.tephra.TransactionSystemClient;
 import org.apache.tephra.Transaction.VisibilityLevel;
 import org.apache.tephra.TxConstants;
+import org.apache.tephra.hbase.coprocessor.TransactionProcessor;
 import org.apache.tephra.inmemory.InMemoryTxSystemClient;
+import org.apache.tephra.util.TxUtils;
 import org.apache.tephra.visibility.FenceWait;
 import org.apache.tephra.visibility.VisibilityFence;
 
@@ -51,19 +54,23 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
     public TephraTransactionContext(byte[] txnBytes) throws IOException {
         this();
-        this.tx = (txnBytes != null && txnBytes.length > 0) ? CODEC.decode(txnBytes) : null;
+        this.tx = (txnBytes != null && txnBytes.length > 0) ? CODEC
+                .decode(txnBytes) : null;
     }
 
     public TephraTransactionContext(PhoenixConnection connection) {
-        this.txServiceClient = connection.getQueryServices().getTransactionSystemClient();
+        this.txServiceClient = connection.getQueryServices()
+                .getTransactionSystemClient();
         this.txAwares = Collections.emptyList();
         this.txContext = new TransactionContext(txServiceClient);
     }
 
-    public TephraTransactionContext(PhoenixTransactionContext ctx, PhoenixConnection connection, boolean subTask) {
-        this.txServiceClient = connection.getQueryServices().getTransactionSystemClient();
+    public TephraTransactionContext(PhoenixTransactionContext ctx,
+            PhoenixConnection connection, boolean subTask) {
+        this.txServiceClient = connection.getQueryServices()
+                .getTransactionSystemClient();
 
-        assert(ctx instanceof TephraTransactionContext);
+        assert (ctx instanceof TephraTransactionContext);
         TephraTransactionContext tephraTransactionContext = (TephraTransactionContext) ctx;
 
         if (subTask) {
@@ -81,51 +88,53 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     @Override
     public void begin() throws SQLException {
         if (txContext == null) {
-            throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build().buildException();
+            throw new SQLExceptionInfo.Builder(
+                    SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build()
+                    .buildException();
         }
 
-        System.out.println("BEGIN");
         try {
             txContext.start();
         } catch (TransactionFailureException e) {
-            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
-            .setMessage(e.getMessage())
-            .setRootCause(e)
-            .build().buildException();
+            throw new SQLExceptionInfo.Builder(
+                    SQLExceptionCode.TRANSACTION_FAILED)
+                    .setMessage(e.getMessage()).setRootCause(e).build()
+                    .buildException();
         }
     }
 
     @Override
     public void commit() throws SQLException {
-        
+
         if (txContext == null || !isTransactionRunning()) {
             return;
         }
-        
+
         try {
             txContext.finish();
         } catch (TransactionFailureException e) {
             this.e = e;
+
             if (e instanceof TransactionConflictException) {
-                throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION)
-                    .setMessage(e.getMessage())
-                    .setRootCause(e)
-                    .build().buildException();
+                throw new SQLExceptionInfo.Builder(
+                        SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION)
+                        .setMessage(e.getMessage()).setRootCause(e).build()
+                        .buildException();
             }
-            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
-                .setMessage(e.getMessage())
-                .setRootCause(e)
-                .build().buildException();
+            throw new SQLExceptionInfo.Builder(
+                    SQLExceptionCode.TRANSACTION_FAILED)
+                    .setMessage(e.getMessage()).setRootCause(e).build()
+                    .buildException();
         }
     }
 
     @Override
     public void abort() throws SQLException {
-        
+
         if (txContext == null || !isTransactionRunning()) {
             return;
         }
-            
+
         try {
             if (e != null) {
                 txContext.abort(e);
@@ -135,10 +144,10 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             }
         } catch (TransactionFailureException e) {
             this.e = null;
-            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
-                .setMessage(e.getMessage())
-                .setRootCause(e)
-                .build().buildException();
+            throw new SQLExceptionInfo.Builder(
+                    SQLExceptionCode.TRANSACTION_FAILED)
+                    .setMessage(e.getMessage()).setRootCause(e).build()
+                    .buildException();
         }
     }
 
@@ -148,8 +157,8 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             try {
                 if (txContext == null) {
                     tx = txServiceClient.checkpoint(tx);
-                }  else {
-                    assert(txContext != null);
+                } else {
+                    assert (txContext != null);
                     txContext.checkpoint();
                     tx = txContext.getCurrentTransaction();
                 }
@@ -159,44 +168,43 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         }
 
         // Since we're querying our own table while mutating it, we must exclude
-        // see our current mutations, otherwise we can get erroneous results (for DELETE)
+        // see our current mutations, otherwise we can get erroneous results
+        // (for DELETE)
         // or get into an infinite loop (for UPSERT SELECT).
         if (txContext == null) {
             tx.setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
+        } else {
+            assert (txContext != null);
+            txContext.getCurrentTransaction().setVisibility(
+                    VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
         }
-        else {
-            assert(txContext != null);
-            txContext.getCurrentTransaction().setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
-        }
-    }
-
-    private Transaction getCurrentTransaction() {
-        if (this.txContext != null) {
-            return this.txContext.getCurrentTransaction();
-        }
-
-        return this.tx;
     }
 
     @Override
-    public void commitDDLFence(PTable dataTable, Logger logger) throws SQLException {
+    public void commitDDLFence(PTable dataTable, Logger logger)
+            throws SQLException {
         byte[] key = dataTable.getName().getBytes();
 
         try {
-            FenceWait fenceWait = VisibilityFence.prepareWait(key, txServiceClient);
+            FenceWait fenceWait = VisibilityFence.prepareWait(key,
+                    txServiceClient);
             fenceWait.await(10000, TimeUnit.MILLISECONDS);
-            
+
             if (logger.isInfoEnabled()) {
-                logger.info("Added write fence at ~" + getCurrentTransaction().getReadPointer());
+                logger.info("Added write fence at ~"
+                        + getCurrentTransaction().getReadPointer());
             }
         } catch (InterruptedException e) {
             Thread.currentThread().interrupt();
-            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
+            throw new SQLExceptionInfo.Builder(
+                    SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e)
+                    .build().buildException();
         } catch (TimeoutException | TransactionFailureException e) {
-            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_UNABLE_TO_GET_WRITE_FENCE)
-            .setSchemaName(dataTable.getSchemaName().getString())
-            .setTableName(dataTable.getTableName().getString())
-            .build().buildException();
+            throw new SQLExceptionInfo.Builder(
+                    SQLExceptionCode.TX_UNABLE_TO_GET_WRITE_FENCE)
+                    .setSchemaName(dataTable.getSchemaName().getString())
+                    .setTableName(dataTable.getTableName().getString()).build()
+                    .buildException();
         }
     }
 
@@ -213,7 +221,8 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
         byte[] physicalKey = table.getPhysicalName().getBytes();
         if (Bytes.compareTo(physicalKey, logicalKey) != 0) {
-            TransactionAware physicalTxAware = VisibilityFence.create(physicalKey);
+            TransactionAware physicalTxAware = VisibilityFence
+                    .create(physicalKey);
             if (this.txContext == null) {
                 this.txAwares.add(physicalTxAware);
             } else {
@@ -224,7 +233,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
     @Override
     public void join(PhoenixTransactionContext ctx) {
-        assert(ctx instanceof TephraTransactionContext);
+        assert (ctx instanceof TephraTransactionContext);
         TephraTransactionContext tephraContext = (TephraTransactionContext) ctx;
 
         if (txContext != null) {
@@ -236,70 +245,51 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         }
     }
 
+    private Transaction getCurrentTransaction() {
+        return tx != null ? tx : txContext != null ? txContext.getCurrentTransaction() : null;
+    }
+
     @Override
     public boolean isTransactionRunning() {
-        if (this.txContext != null) {
-            return (this.txContext.getCurrentTransaction() != null);
-        }
-
-        if (this.tx != null) {
-            return true;
-        }
-
-        return false;
+        return getCurrentTransaction() != null;
     }
 
     @Override
     public void reset() {
         tx = null;
         txAwares.clear();
+        this.e = null;
     }
 
     @Override
     public long getTransactionId() {
-        if (this.txContext != null) {
-            return txContext.getCurrentTransaction().getTransactionId();
-        }
-
-        if (tx != null) {
-            return tx.getTransactionId();
-        }
-
-        return HConstants.LATEST_TIMESTAMP;
+        Transaction tx = getCurrentTransaction();
+        return tx == null ? HConstants.LATEST_TIMESTAMP : tx.getTransactionId(); // First write pointer - won't change with checkpointing
     }
 
     @Override
     public long getReadPointer() {
-        if (this.txContext != null) {
-            return txContext.getCurrentTransaction().getReadPointer();
-        }
+        Transaction tx = getCurrentTransaction();
 
-        if (tx != null) {
-            return tx.getReadPointer();
+        if (tx == null) {
+            return (-1);
         }
 
-        return (-1);
+        return tx.getReadPointer();
     }
 
     // For testing
     @Override
     public long getWritePointer() {
-        if (this.txContext != null) {
-            return txContext.getCurrentTransaction().getWritePointer();
-        }
-
-        if (tx != null) {
-            return tx.getWritePointer();
-        }
-
-        return HConstants.LATEST_TIMESTAMP;
+        Transaction tx = getCurrentTransaction();
+        return tx == null ? HConstants.LATEST_TIMESTAMP : tx.getWritePointer();
     }
 
     @Override
     public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) {
         VisibilityLevel tephraVisibilityLevel = null;
 
-        switch(visibilityLevel) {
+        switch (visibilityLevel) {
         case SNAPSHOT:
             tephraVisibilityLevel = VisibilityLevel.SNAPSHOT;
             break;
@@ -307,34 +297,29 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
             break;
         case SNAPSHOT_ALL:
+            System.out.println("OHAD Move to SNAPSHOT_ALL ");
+            System.out.flush();
             tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_ALL;
             break;
         default:
-            assert(false);               
+            assert (false);
         }
 
-        if (this.txContext != null) {
-            txContext.getCurrentTransaction().setVisibility(tephraVisibilityLevel);
-        } else if (tx != null) {
-            tx.setVisibility(tephraVisibilityLevel);
-        } else {
-            assert(false);
-        }
+        Transaction tx = getCurrentTransaction();
+        assert(tx != null);
+        tx.setVisibility(tephraVisibilityLevel);
     }
-    
-    // For testing
+
     @Override
     public PhoenixVisibilityLevel getVisibilityLevel() {
         VisibilityLevel visibilityLevel = null;
 
-        if (this.txContext != null) {
-            visibilityLevel = txContext.getCurrentTransaction().getVisibilityLevel();
-        } else if (tx != null) {
-            visibilityLevel = tx.getVisibilityLevel();
-        }
+        Transaction tx = getCurrentTransaction();
+        assert(tx != null);
+        visibilityLevel = tx.getVisibilityLevel();
 
         PhoenixVisibilityLevel phoenixVisibilityLevel;
-        switch(visibilityLevel) {
+        switch (visibilityLevel) {
         case SNAPSHOT:
             phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT;
             break;
@@ -352,36 +337,37 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
     @Override
     public byte[] encodeTransaction() throws SQLException {
-
-        Transaction transaction = null;
-
-        if (this.txContext != null) {
-            transaction = txContext.getCurrentTransaction();
-        } else if (tx != null) {
-            transaction =  tx;
-        }
-
-        assert (transaction != null);
+        Transaction tx = getCurrentTransaction();
+        assert (tx != null);
 
         try {
-            return CODEC.encode(transaction);
+            return CODEC.encode(tx);
         } catch (IOException e) {
             throw new SQLException(e);
         }
     }
-    
+
     @Override
     public long getMaxTransactionsPerSecond() {
         return TxConstants.MAX_TX_PER_MS;
     }
 
+    @Override
+    public boolean isPreExistingVersion(long version) {
+        return TxUtils.isPreExistingVersion(version);
+    }
+
+    @Override
+    public BaseRegionObserver getCoProcessor() {
+        return new TransactionProcessor();
+    }
 
     /**
-    * TephraTransactionContext specific functions
-    */
+     * TephraTransactionContext specific functions
+     */
 
     Transaction getTransaction() {
-        return this.tx;
+        return this.getCurrentTransaction();
     }
 
     TransactionContext getContext() {
@@ -397,7 +383,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             txContext.addTransactionAware(txAware);
         } else if (this.tx != null) {
             txAwares.add(txAware);
-            assert(tx != null);
+            assert (tx != null);
             txAware.startTx(tx);
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 4a9cb57..2bab0b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -104,7 +104,7 @@ import org.apache.phoenix.schema.types.PDecimal;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.tephra.TxConstants;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
 
 import com.google.common.collect.Lists;
 import com.google.protobuf.ServiceException;
@@ -261,7 +261,7 @@ public class IndexUtil {
                     regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
                 }
                 Delete delete = maintainer.buildDeleteMutation(kvBuilder, null, ptr, Collections.<KeyValue>emptyList(), ts, regionStartKey, regionEndkey);
-                delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, dataMutation.getAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY));
+                delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, dataMutation.getAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY));
                 indexMutations.add(delete);
             }
             return indexMutations;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index 5bfb55d..ca286d6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -85,7 +85,7 @@ import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.types.PDataType;
-import org.apache.tephra.util.TxUtils;
+import org.apache.phoenix.transaction.TransactionFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Splitter;
@@ -1433,7 +1433,7 @@ public class PhoenixRuntime {
      * @return wall clock time in milliseconds (i.e. Epoch time) of a given Cell time stamp.
      */
     public static long getWallClockTimeFromCellTimeStamp(long tsOfCell) {
-        return TxUtils.isPreExistingVersion(tsOfCell) ? tsOfCell : TransactionUtil.convertToMilliseconds(tsOfCell);
+        return TransactionFactory.getTransactionFactory().getTransactionContext().isPreExistingVersion(tsOfCell) ? tsOfCell : TransactionUtil.convertToMilliseconds(tsOfCell);
     }
 
     public static long getCurrentScn(ReadOnlyProps props) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5f86341/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
index 94a56b8..0a55147 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
@@ -33,7 +33,6 @@ import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.transaction.PhoenixTransactionalTable;
 import org.apache.phoenix.transaction.TephraTransactionTable;
 import org.apache.phoenix.transaction.TransactionFactory;
-import org.apache.tephra.TxConstants;
 
 public class TransactionUtil {
     private TransactionUtil() {


[26/46] phoenix git commit: Merge with omid branch

Posted by td...@apache.org.
Merge with omid branch


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/34e2d36f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/34e2d36f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/34e2d36f

Branch: refs/heads/omid
Commit: 34e2d36fdc6817c736f186528182b612b8ffc9a0
Parents: 5b838cf f5f8634
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Tue Apr 18 14:07:25 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Tue Apr 18 14:07:25 2017 +0300

----------------------------------------------------------------------
 .../org/apache/phoenix/tx/FlappingTransactionIT.java  |  3 ---
 .../it/java/org/apache/phoenix/tx/TransactionIT.java  | 14 +++++++-------
 .../coprocessor/BaseScannerRegionObserver.java        |  1 -
 .../coprocessor/PhoenixTransactionalProcessor.java    |  4 ++--
 .../phoenix/coprocessor/ScanRegionObserver.java       |  1 -
 .../coprocessor/UngroupedAggregateRegionObserver.java |  6 +++---
 .../org/apache/phoenix/jdbc/PhoenixConnection.java    |  1 -
 .../org/apache/phoenix/schema/MetaDataClient.java     |  4 ++--
 .../main/java/org/apache/phoenix/util/IndexUtil.java  |  4 ++--
 .../java/org/apache/phoenix/util/PhoenixRuntime.java  |  4 ++--
 .../java/org/apache/phoenix/util/TransactionUtil.java |  1 -
 11 files changed, 18 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/34e2d36f/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index f37d09b,ff2bf6b..f1cf7df
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@@ -291,4 -600,118 +291,4 @@@ public class TransactionIT  extends Par
              conn.close();
          }
      }
- }
 -    
 -    @Test
 -    public void testParallelUpsertSelect() throws Exception {
 -        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 -        props.setProperty(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB, Integer.toString(512));
 -        props.setProperty(QueryServices.SCAN_CACHE_SIZE_ATTRIB, Integer.toString(3));
 -        props.setProperty(QueryServices.SCAN_RESULT_CHUNK_SIZE, Integer.toString(3));
 -        Connection conn = DriverManager.getConnection(getUrl(), props);
 -        conn.setAutoCommit(false);
 -        String fullTableName1 = generateUniqueName();
 -        String fullTableName2 = generateUniqueName();
 -        String sequenceName = "S_" + generateUniqueName();
 -        conn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
 -        conn.createStatement().execute("CREATE TABLE " + fullTableName1 + " (pk INTEGER PRIMARY KEY, val INTEGER) SALT_BUCKETS=4,TRANSACTIONAL=true");
 -        conn.createStatement().execute("CREATE TABLE " + fullTableName2 + " (pk INTEGER PRIMARY KEY, val INTEGER) TRANSACTIONAL=true");
 -
 -        for (int i = 0; i < 100; i++) {
 -            conn.createStatement().execute("UPSERT INTO " + fullTableName1 + " VALUES (NEXT VALUE FOR " + sequenceName + ", " + (i%10) + ")");
 -        }
 -        conn.commit();
 -        conn.setAutoCommit(true);
 -        int upsertCount = conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName2 + " SELECT pk, val FROM " + fullTableName1);
 -        assertEquals(100,upsertCount);
 -        conn.close();
 -    }
 -
 -    @Test
 -    public void testTransactionalTableMetadata() throws SQLException {
 -
 -        try (Connection conn = DriverManager.getConnection(getUrl())) {
 -            String transactTableName = generateUniqueName();
 -            Statement stmt = conn.createStatement();
 -            stmt.execute("CREATE TABLE " + transactTableName + " (k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) " +
 -                "TRANSACTIONAL=true");
 -            conn.commit();
 -
 -            DatabaseMetaData dbmd = conn.getMetaData();
 -            ResultSet rs = dbmd.getTables(null, null, StringUtil.escapeLike(transactTableName), null);
 -            assertTrue(rs.next());
 -            assertEquals("Transactional table was not marked as transactional in JDBC API.",
 -                "true", rs.getString(PhoenixDatabaseMetaData.TRANSACTIONAL));
 -
 -            String nonTransactTableName = generateUniqueName();
 -            Statement stmt2 = conn.createStatement();
 -            stmt2.execute("CREATE TABLE " + nonTransactTableName + "(k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) ");
 -            conn.commit();
 -
 -            ResultSet rs2 = dbmd.getTables(null, null, StringUtil.escapeLike(nonTransactTableName), null);
 -            assertTrue(rs2.next());
 -            assertEquals("Non-transactional table was marked as transactional in JDBC API.",
 -                "false", rs2.getString(PhoenixDatabaseMetaData.TRANSACTIONAL));
 -        }
 -    }
 -
 -    @Test
 -    public void testInflightPartialEval() throws SQLException {
 -
 -        try (Connection conn = DriverManager.getConnection(getUrl())) {
 -            String transactTableName = generateUniqueName();
 -            Statement stmt = conn.createStatement();
 -            stmt.execute("CREATE TABLE " + transactTableName + " (k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) " +
 -                "TRANSACTIONAL=true");
 -            
 -            try (Connection conn1 = DriverManager.getConnection(getUrl()); Connection conn2 = DriverManager.getConnection(getUrl())) {
 -                conn1.createStatement().execute("UPSERT INTO " + transactTableName + " VALUES ('a','b','x')");
 -                // Select to force uncommitted data to be written
 -                ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + transactTableName);
 -                assertTrue(rs.next());
 -                assertEquals("a", rs.getString(1));
 -                assertEquals("b", rs.getString(2));
 -                assertFalse(rs.next());
 -                
 -                conn2.createStatement().execute("UPSERT INTO " + transactTableName + " VALUES ('a','c','x')");
 -                // Select to force uncommitted data to be written
 -                rs = conn2.createStatement().executeQuery("SELECT * FROM " + transactTableName );
 -                assertTrue(rs.next());
 -                assertEquals("a", rs.getString(1));
 -                assertEquals("c", rs.getString(2));
 -                assertFalse(rs.next());
 -                
 -                // If the AndExpression were to see the uncommitted row from conn2, the filter would
 -                // filter the row out early and no longer continue to evaluate other cells due to
 -                // the way partial evaluation holds state.
 -                rs = conn1.createStatement().executeQuery("SELECT * FROM " +  transactTableName + " WHERE v1 != 'c' AND v2 = 'x'");
 -                assertTrue(rs.next());
 -                assertEquals("a", rs.getString(1));
 -                assertEquals("b", rs.getString(2));
 -                assertFalse(rs.next());
 -                
 -                // Same as above for conn1 data
 -                rs = conn2.createStatement().executeQuery("SELECT * FROM " + transactTableName + " WHERE v1 != 'b' AND v2 = 'x'");
 -                assertTrue(rs.next());
 -                assertEquals("a", rs.getString(1));
 -                assertEquals("c", rs.getString(2));
 -                assertFalse(rs.next());
 -            }
 -
 -        }
 -    }
 -    
 -    
 -    @Test
 -    public void testOnDupKeyForTransactionalTable() throws Exception {
 -        // TODO: we should support having a transactional table defined for a connectionless connection
 -        try (Connection conn = DriverManager.getConnection(getUrl())) {
 -            String transactTableName = generateUniqueName();
 -            conn.createStatement().execute("CREATE TABLE " + transactTableName + " (k integer not null primary key, v bigint) TRANSACTIONAL=true");
 -            conn.createStatement().execute("UPSERT INTO " + transactTableName + " VALUES(0,0) ON DUPLICATE KEY UPDATE v = v + 1");
 -            fail();
 -        } catch (SQLException e) {
 -            assertEquals(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL.getErrorCode(), e.getErrorCode());
 -        }
 -    }
 -    
+ }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/34e2d36f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/34e2d36f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/34e2d36f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 2dec235,9e4f39d..ea18401
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@@ -110,8 -108,8 +110,9 @@@ import org.apache.phoenix.schema.types.
  import org.apache.phoenix.schema.types.PDouble;
  import org.apache.phoenix.schema.types.PFloat;
  import org.apache.phoenix.schema.types.PLong;
+ import org.apache.phoenix.transaction.PhoenixTransactionContext;
  import org.apache.phoenix.util.ByteUtil;
 +import org.apache.phoenix.util.EncodedColumnsUtil;
  import org.apache.phoenix.util.IndexUtil;
  import org.apache.phoenix.util.KeyValueUtil;
  import org.apache.phoenix.util.LogUtil;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/34e2d36f/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/34e2d36f/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d09284f,42d1431..3bb6ae0
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@@ -212,8 -199,8 +212,9 @@@ import org.apache.phoenix.schema.types.
  import org.apache.phoenix.schema.types.PUnsignedLong;
  import org.apache.phoenix.schema.types.PVarbinary;
  import org.apache.phoenix.schema.types.PVarchar;
+ import org.apache.phoenix.transaction.PhoenixTransactionContext;
  import org.apache.phoenix.util.ByteUtil;
 +import org.apache.phoenix.util.EncodedColumnsUtil;
  import org.apache.phoenix.util.IndexUtil;
  import org.apache.phoenix.util.LogUtil;
  import org.apache.phoenix.util.MetaDataUtil;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/34e2d36f/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/34e2d36f/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------


[40/46] phoenix git commit: PHOENIX-3806 Reduce IndexUpdateManager sorting overhead during Index rebuild

Posted by td...@apache.org.
PHOENIX-3806 Reduce IndexUpdateManager sorting overhead during Index rebuild

Signed-off-by: Lars Hofhansl <la...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a1d3c169
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a1d3c169
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a1d3c169

Branch: refs/heads/omid
Commit: a1d3c1697eb478a04c7f94e37cdfc2334f94b4f5
Parents: f51c0db
Author: Vincent Poon <vi...@gmail.com>
Authored: Mon May 8 16:58:01 2017 -0700
Committer: Lars Hofhansl <la...@apache.org>
Committed: Mon May 8 16:58:01 2017 -0700

----------------------------------------------------------------------
 .../covered/update/IndexUpdateManager.java      |  12 +-
 .../index/covered/update/SortedCollection.java  | 128 -------------------
 2 files changed, 8 insertions(+), 132 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a1d3c169/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java
index a183186..5f6020a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java
@@ -23,6 +23,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.TreeSet;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Delete;
@@ -109,7 +110,7 @@ public class IndexUpdateManager {
     ImmutableBytesPtr key = new ImmutableBytesPtr(tableName);
     Collection<Mutation> updates = map.get(key);
     if (updates == null) {
-      updates = new SortedCollection<Mutation>(COMPARATOR);
+      updates = new TreeSet<Mutation>(COMPARATOR);
       map.put(key, updates);
     }
     fixUpCurrentUpdates(updates, m);
@@ -167,9 +168,12 @@ public class IndexUpdateManager {
         break;
       }
     }
-    
-    updates.remove(toRemove);
-    updates.add(pendingMutation);
+    if (toRemove != null) {
+        updates.remove(toRemove);
+    }
+    if (pendingMutation != null) {
+        updates.add(pendingMutation);
+    }
   }
 
   private void markMutationForRemoval(Mutation m) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a1d3c169/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/SortedCollection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/SortedCollection.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/SortedCollection.java
deleted file mode 100644
index ee8b453..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/SortedCollection.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.update;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.PriorityQueue;
-
-import com.google.common.collect.Iterators;
-
-/**
- * A collection whose elements are stored and returned sorted.
- * <p>
- * We can't just use something like a {@link PriorityQueue} because it doesn't return the
- * underlying values in sorted order.
- * @param <T>
- */
-class SortedCollection<T> implements Collection<T>, Iterable<T> {
-
-  private PriorityQueue<T> queue;
-  private Comparator<T> comparator;
-
-  /**
-   * Use the given comparator to compare all keys for sorting
-   * @param comparator
-   */
-  public SortedCollection(Comparator<T> comparator) {
-    this.queue = new PriorityQueue<T>(1, comparator);
-    this.comparator = comparator;
-  }
-  
-  /**
-   * All passed elements are expected to be {@link Comparable}
-   */
-  public SortedCollection() {
-    this.queue = new PriorityQueue<T>();
-  }
-  
-  @Override
-  public int size() {
-    return this.queue.size();
-  }
-
-  @Override
-  public boolean isEmpty() {
-    return this.queue.isEmpty();
-  }
-
-  @Override
-  public boolean contains(Object o) {
-    return this.queue.contains(o);
-  }
-
-  @Override
-  public Iterator<T> iterator() {
-    @SuppressWarnings("unchecked")
-    T[] array = (T[]) this.queue.toArray();
-    if (this.comparator == null) {
-      Arrays.sort(array);
-    } else {
-      Arrays.sort(
-     array, this.comparator);}
-    return Iterators.forArray(array);
-  }
-
-  @Override
-  public Object[] toArray() {
-    return this.queue.toArray();
-  }
-
-  @SuppressWarnings("hiding")
-  @Override
-  public <T> T[] toArray(T[] a) {
-    return this.queue.toArray(a);
-  }
-
-  @Override
-  public boolean add(T e) {
-    return this.queue.add(e);
-  }
-
-  @Override
-  public boolean remove(Object o) {
-    return this.queue.remove(o);
-  }
-
-  @Override
-  public boolean containsAll(Collection<?> c) {
-    return this.queue.containsAll(c);
-  }
-
-  @Override
-  public boolean addAll(Collection<? extends T> c) {
-    return this.queue.addAll(c);
-  }
-
-  @Override
-  public boolean removeAll(Collection<?> c) {
-    return queue.removeAll(c);
-  }
-
-  @Override
-  public boolean retainAll(Collection<?> c) {
-    return this.queue.retainAll(c);
-  }
-
-  @Override
-  public void clear() {
-    this.queue.clear();
-  }
-}
\ No newline at end of file


[25/46] phoenix git commit: Merge with omid branch

Posted by td...@apache.org.
Merge with omid branch


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5b838cfc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5b838cfc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5b838cfc

Branch: refs/heads/omid
Commit: 5b838cfc00d82a6bed05fcb96bcf5d04cbf3d969
Parents: f584e5f 6f7d42f
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Tue Apr 18 13:59:27 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Tue Apr 18 13:59:27 2017 +0300

----------------------------------------------------------------------
 LICENSE                                         |   10 -
 phoenix-assembly/pom.xml                        |    2 +-
 phoenix-client/pom.xml                          |    2 +-
 phoenix-core/pom.xml                            |    2 +-
 .../phoenix/end2end/AggregateQueryIT.java       |   74 +-
 .../AlterMultiTenantTableWithViewsIT.java       |   25 +-
 .../apache/phoenix/end2end/AlterTableIT.java    |  491 +-
 .../phoenix/end2end/AlterTableWithViewsIT.java  |  133 +-
 .../org/apache/phoenix/end2end/ArrayIT.java     |   28 +
 .../org/apache/phoenix/end2end/BaseJoinIT.java  |    4 +-
 .../org/apache/phoenix/end2end/BaseQueryIT.java |   98 +-
 .../apache/phoenix/end2end/CaseStatementIT.java |   54 +-
 .../apache/phoenix/end2end/CastAndCoerceIT.java |   34 +-
 .../end2end/ClientTimeArithmeticQueryIT.java    |   76 +-
 .../end2end/ColumnEncodedBytesPropIT.java       |  112 +
 .../end2end/CountDistinctCompressionIT.java     |    2 +-
 .../apache/phoenix/end2end/CreateTableIT.java   |  136 +
 .../phoenix/end2end/CsvBulkLoadToolIT.java      |   36 +
 .../org/apache/phoenix/end2end/DateTimeIT.java  |    2 +-
 .../phoenix/end2end/DefaultColumnValueIT.java   |    1 +
 .../apache/phoenix/end2end/DerivedTableIT.java  |    2 +-
 .../apache/phoenix/end2end/DistinctCountIT.java |    4 +-
 .../apache/phoenix/end2end/DropSchemaIT.java    |   11 +
 .../apache/phoenix/end2end/DynamicColumnIT.java |   63 +
 .../phoenix/end2end/ExtendedQueryExecIT.java    |    8 +-
 .../apache/phoenix/end2end/FunkyNamesIT.java    |    2 +-
 .../apache/phoenix/end2end/GroupByCaseIT.java   |   66 +-
 .../org/apache/phoenix/end2end/GroupByIT.java   |  162 +-
 .../phoenix/end2end/ImmutableTablePropIT.java   |  130 -
 .../end2end/ImmutableTablePropertiesIT.java     |  189 +
 .../apache/phoenix/end2end/MutableQueryIT.java  |  424 ++
 .../phoenix/end2end/NativeHBaseTypesIT.java     |    2 +-
 .../org/apache/phoenix/end2end/NotQueryIT.java  |   28 +-
 .../org/apache/phoenix/end2end/OrderByIT.java   |    2 -
 .../apache/phoenix/end2end/PercentileIT.java    |    4 +-
 .../phoenix/end2end/PhoenixRuntimeIT.java       |    4 +-
 .../phoenix/end2end/PointInTimeQueryIT.java     |   78 +-
 .../phoenix/end2end/ProductMetricsIT.java       |    2 +-
 .../end2end/QueryDatabaseMetaDataIT.java        |   16 +-
 .../org/apache/phoenix/end2end/QueryIT.java     |  112 +-
 .../phoenix/end2end/ReadIsolationLevelIT.java   |    2 +-
 .../phoenix/end2end/RegexBulkLoadToolIT.java    |  371 ++
 .../apache/phoenix/end2end/RenewLeaseIT.java    |    7 +-
 .../phoenix/end2end/RowValueConstructorIT.java  |   36 +-
 .../org/apache/phoenix/end2end/ScanQueryIT.java |   93 +-
 .../phoenix/end2end/StatsCollectorIT.java       |  124 +-
 .../apache/phoenix/end2end/StoreNullsIT.java    |  310 +-
 .../phoenix/end2end/StoreNullsPropIT.java       |   51 +
 ...SysTableNamespaceMappedStatsCollectorIT.java |    4 +-
 .../java/org/apache/phoenix/end2end/TopNIT.java |    6 +-
 .../apache/phoenix/end2end/UpsertSelectIT.java  |   10 +-
 .../apache/phoenix/end2end/UpsertValuesIT.java  |   51 +-
 .../phoenix/end2end/UserDefinedFunctionsIT.java |    3 +-
 .../phoenix/end2end/VariableLengthPKIT.java     |   38 +-
 .../phoenix/end2end/index/DropColumnIT.java     |  517 ++
 .../phoenix/end2end/index/DropMetadataIT.java   |  215 -
 .../phoenix/end2end/index/ImmutableIndexIT.java |   20 +-
 .../end2end/index/IndexExpressionIT.java        |   28 +-
 .../apache/phoenix/end2end/index/IndexIT.java   |   58 +-
 .../phoenix/end2end/index/IndexTestUtil.java    |   11 +-
 .../end2end/index/MutableIndexFailureIT.java    |    2 +
 .../phoenix/end2end/index/MutableIndexIT.java   |   96 +-
 .../phoenix/end2end/salted/SaltedTableIT.java   |    2 +-
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |    2 +-
 .../iterate/RenewLeaseOnlyTableIterator.java    |   17 +-
 .../phoenix/monitoring/PhoenixMetricsIT.java    |   39 +-
 .../SystemCatalogWALEntryFilterIT.java          |  185 +
 .../phoenix/trace/PhoenixTracingEndToEndIT.java |    6 +-
 .../phoenix/tx/ParameterizedTransactionIT.java  |  518 ++
 .../org/apache/phoenix/tx/TransactionIT.java    |  589 +-
 .../org/apache/phoenix/tx/TxCheckpointIT.java   |   14 +-
 .../IndexHalfStoreFileReaderGenerator.java      |   12 +-
 .../regionserver/wal/IndexedWALEditCodec.java   |   20 +-
 .../apache/phoenix/cache/ServerCacheClient.java |    2 +
 .../org/apache/phoenix/cache/TenantCache.java   |    2 +-
 .../apache/phoenix/cache/TenantCacheImpl.java   |    4 +-
 .../phoenix/compile/CreateTableCompiler.java    |   10 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |    2 +-
 .../phoenix/compile/ExpressionCompiler.java     |   18 +-
 .../apache/phoenix/compile/FromCompiler.java    |   54 +-
 .../apache/phoenix/compile/JoinCompiler.java    |   15 +-
 .../phoenix/compile/ListJarsQueryPlan.java      |    6 +-
 .../apache/phoenix/compile/PostDDLCompiler.java |   11 +-
 .../compile/PostLocalIndexDDLCompiler.java      |    9 +-
 .../phoenix/compile/ProjectionCompiler.java     |   35 +-
 .../apache/phoenix/compile/TraceQueryPlan.java  |    4 +-
 .../compile/TupleProjectionCompiler.java        |   31 +-
 .../apache/phoenix/compile/UnionCompiler.java   |    7 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   32 +-
 .../apache/phoenix/compile/WhereCompiler.java   |   24 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   54 +-
 .../GroupedAggregateRegionObserver.java         |   42 +-
 .../coprocessor/HashJoinRegionScanner.java      |   25 +-
 .../coprocessor/MetaDataEndpointImpl.java       |  273 +-
 .../phoenix/coprocessor/MetaDataProtocol.java   |    6 +-
 .../coprocessor/MetaDataRegionObserver.java     |    4 +-
 .../phoenix/coprocessor/ScanRegionObserver.java |   33 +-
 .../coprocessor/ServerCachingEndpointImpl.java  |    2 +-
 .../coprocessor/ServerCachingProtocol.java      |    2 +-
 .../UngroupedAggregateRegionObserver.java       |  147 +-
 .../coprocessor/generated/PTableProtos.java     | 1474 ++++-
 .../generated/ServerCachingProtos.java          | 5125 +++++++++++++++++-
 .../phoenix/exception/SQLExceptionCode.java     |    9 +-
 .../apache/phoenix/execute/BaseQueryPlan.java   |   27 +-
 .../apache/phoenix/execute/CorrelatePlan.java   |    2 +-
 .../apache/phoenix/execute/MutationState.java   |   19 +-
 .../phoenix/execute/SortMergeJoinPlan.java      |    2 +-
 .../apache/phoenix/execute/TupleProjector.java  |   72 +-
 .../apache/phoenix/execute/UnnestArrayPlan.java |    3 +-
 .../expression/ArrayConstructorExpression.java  |   82 +-
 .../phoenix/expression/CoerceExpression.java    |    2 +-
 .../phoenix/expression/ExpressionType.java      |    5 +-
 .../expression/KeyValueColumnExpression.java    |   32 +-
 .../phoenix/expression/LiteralExpression.java   |   11 +-
 .../expression/ProjectedColumnExpression.java   |    1 +
 .../expression/SingleCellColumnExpression.java  |  182 +
 .../SingleCellConstructorExpression.java        |  102 +
 .../function/ArrayElemRefExpression.java        |    4 +-
 .../expression/function/ArrayIndexFunction.java |    4 +-
 .../expression/util/regex/JONIPattern.java      |   18 +-
 .../visitor/BaseExpressionVisitor.java          |    6 +
 .../visitor/CloneExpressionVisitor.java         |   12 +
 .../CloneNonDeterministicExpressionVisitor.java |    1 +
 .../expression/visitor/ExpressionVisitor.java   |    6 +
 .../StatelessTraverseAllExpressionVisitor.java  |   13 +-
 .../StatelessTraverseNoExpressionVisitor.java   |   13 +-
 .../phoenix/filter/ColumnProjectionFilter.java  |   24 +-
 ...EncodedQualifiersColumnProjectionFilter.java |  151 +
 .../MultiEncodedCQKeyValueComparisonFilter.java |  369 ++
 .../filter/MultiKeyValueComparisonFilter.java   |    6 +-
 .../SingleCQKeyValueComparisonFilter.java       |    3 +-
 .../filter/SingleKeyValueComparisonFilter.java  |    4 +-
 .../apache/phoenix/filter/SkipScanFilter.java   |   15 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |    4 +
 .../apache/phoenix/hbase/index/ValueGetter.java |    1 +
 .../hbase/index/covered/LocalTableState.java    |   14 +-
 .../phoenix/hbase/index/covered/TableState.java |    4 +-
 .../example/CoveredColumnIndexCodec.java        |    4 +-
 .../hbase/index/scanner/ScannerBuilder.java     |    8 +-
 .../hbase/index/util/IndexManagementUtil.java   |    2 +-
 .../hbase/index/util/KeyValueBuilder.java       |    1 +
 .../apache/phoenix/index/IndexMaintainer.java   |  538 +-
 .../phoenix/index/IndexMetaDataCacheClient.java |    1 +
 .../index/IndexMetaDataCacheFactory.java        |    4 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |    2 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |   26 +-
 .../index/PhoenixIndexFailurePolicy.java        |    2 +-
 .../phoenix/index/PhoenixIndexMetaData.java     |    9 +-
 .../index/PhoenixTransactionalIndexer.java      |   18 +-
 .../phoenix/iterate/BaseResultIterators.java    |  111 +-
 .../iterate/LookAheadResultIterator.java        |    2 +-
 .../phoenix/iterate/MappedByteBufferQueue.java  |    1 +
 .../phoenix/iterate/OrderedResultIterator.java  |    3 +-
 .../iterate/RegionScannerResultIterator.java    |   19 +-
 .../phoenix/iterate/TableResultIterator.java    |  186 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |    2 +
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   14 +-
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  |    3 -
 .../phoenix/jdbc/PhoenixEmbeddedDriver.java     |   15 +-
 .../apache/phoenix/jdbc/PhoenixResultSet.java   |    2 +-
 .../apache/phoenix/join/HashCacheFactory.java   |    2 +-
 .../phoenix/mapreduce/AbstractBulkLoadTool.java |    2 +-
 .../mapreduce/FormatToBytesWritableMapper.java  |   54 +-
 .../mapreduce/FormatToKeyValueReducer.java      |   58 +-
 .../mapreduce/MultiHfileOutputFormat.java       |    2 +-
 .../phoenix/mapreduce/PhoenixInputFormat.java   |   69 +-
 .../phoenix/mapreduce/PhoenixInputSplit.java    |   23 +-
 .../phoenix/mapreduce/RegexBulkLoadTool.java    |   74 +
 .../mapreduce/RegexToKeyValueMapper.java        |  135 +
 .../phoenix/mapreduce/index/IndexTool.java      |   12 +-
 .../index/PhoenixIndexPartialBuildMapper.java   |    4 +-
 .../util/PhoenixConfigurationUtil.java          |   11 +
 .../phoenix/monitoring/GlobalClientMetrics.java |    8 +-
 .../apache/phoenix/monitoring/MetricType.java   |    5 +-
 .../query/ConnectionQueryServicesImpl.java      |  166 +-
 .../query/ConnectionlessQueryServicesImpl.java  |    1 -
 .../java/org/apache/phoenix/query/KeyRange.java |   72 +-
 .../apache/phoenix/query/QueryConstants.java    |   59 +-
 .../org/apache/phoenix/query/QueryServices.java |   12 +-
 .../phoenix/query/QueryServicesOptions.java     |   10 +-
 .../SystemCatalogWALEntryFilter.java            |   69 +
 .../org/apache/phoenix/schema/ColumnRef.java    |   16 +-
 .../phoenix/schema/ColumnValueDecoder.java      |   31 +
 .../phoenix/schema/ColumnValueEncoder.java      |   45 +
 .../apache/phoenix/schema/DelegateColumn.java   |    4 +
 .../apache/phoenix/schema/DelegateTable.java    |   24 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  593 +-
 .../java/org/apache/phoenix/schema/PColumn.java |    4 +-
 .../apache/phoenix/schema/PColumnFamily.java    |   14 +-
 .../phoenix/schema/PColumnFamilyImpl.java       |   49 +-
 .../org/apache/phoenix/schema/PColumnImpl.java  |   29 +-
 .../apache/phoenix/schema/PMetaDataImpl.java    |    4 +-
 .../java/org/apache/phoenix/schema/PTable.java  |  435 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  480 +-
 .../org/apache/phoenix/schema/PTableKey.java    |    6 +-
 .../apache/phoenix/schema/ProjectedColumn.java  |   12 +-
 .../org/apache/phoenix/schema/RowKeySchema.java |   10 +-
 .../org/apache/phoenix/schema/SaltingUtil.java  |    2 +-
 .../apache/phoenix/schema/TableProperty.java    |   42 +
 .../apache/phoenix/schema/tuple/BaseTuple.java  |   39 +
 .../phoenix/schema/tuple/DelegateTuple.java     |    7 +
 .../tuple/EncodedColumnQualiferCellsList.java   |  581 ++
 .../schema/tuple/MultiKeyValueTuple.java        |    1 +
 .../tuple/PositionBasedMultiKeyValueTuple.java  |   90 +
 .../schema/tuple/PositionBasedResultTuple.java  |  125 +
 .../phoenix/schema/tuple/ResultTuple.java       |   20 +-
 .../org/apache/phoenix/schema/tuple/Tuple.java  |    4 +
 .../phoenix/schema/types/PArrayDataType.java    |  340 +-
 .../schema/types/PArrayDataTypeDecoder.java     |  102 +
 .../schema/types/PArrayDataTypeEncoder.java     |  170 +
 .../transaction/OmidTransactionContext.java     |   13 +
 .../transaction/PhoenixTransactionContext.java  |   20 +-
 .../transaction/TephraTransactionContext.java   |  224 +-
 .../apache/phoenix/util/EncodedColumnsUtil.java |  205 +
 .../java/org/apache/phoenix/util/IndexUtil.java |   76 +-
 .../org/apache/phoenix/util/KeyValueUtil.java   |    2 -
 .../org/apache/phoenix/util/MetaDataUtil.java   |    5 +
 .../org/apache/phoenix/util/PhoenixRuntime.java |   12 +-
 .../org/apache/phoenix/util/ResultUtil.java     |   60 -
 .../java/org/apache/phoenix/util/ScanUtil.java  |   29 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |   29 +-
 .../phoenix/util/regex/RegexUpsertExecutor.java |   80 +
 .../wal/IndexedWALEditCodecTest.java            |   32 +
 .../apache/phoenix/cache/TenantCacheTest.java   |    6 +-
 .../phoenix/compile/HavingCompilerTest.java     |    2 +-
 .../phoenix/compile/QueryCompilerTest.java      |   70 +-
 .../phoenix/compile/QueryOptimizerTest.java     |   51 +
 .../compile/SelectStatementRewriterTest.java    |   11 +-
 .../phoenix/compile/WhereCompilerTest.java      |   44 +-
 .../phoenix/execute/CorrelatePlanTest.java      |   12 +-
 .../execute/LiteralResultIteratorPlanTest.java  |   12 +-
 .../phoenix/execute/MutationStateTest.java      |    4 +-
 .../phoenix/execute/UnnestArrayPlanTest.java    |    8 +-
 .../ArrayConstructorExpressionTest.java         |   20 +-
 .../expression/ColumnExpressionTest.java        |   27 +-
 .../phoenix/filter/SkipScanFilterTest.java      |  229 +-
 .../index/covered/TestLocalTableState.java      |   10 +-
 .../phoenix/index/IndexMaintainerTest.java      |    7 +-
 .../iterate/AggregateResultScannerTest.java     |    2 +-
 .../phoenix/jdbc/PhoenixEmbeddedDriverTest.java |   10 +
 .../phoenix/jdbc/SecureUserConnectionsTest.java |   66 +-
 .../query/BaseConnectionlessQueryTest.java      |   18 +-
 .../java/org/apache/phoenix/query/BaseTest.java |   61 +-
 .../phoenix/query/ConnectionlessTest.java       |   18 +-
 .../EncodedColumnQualifierCellsListTest.java    |  608 +++
 .../apache/phoenix/query/KeyRangeMoreTest.java  |  263 +
 .../phoenix/query/ScannerLeaseRenewalTest.java  |   21 +-
 .../schema/ImmutableStorageSchemeTest.java      |  182 +
 .../schema/types/PDataTypeForArraysTest.java    |   38 +-
 .../apache/phoenix/util/PhoenixRuntimeTest.java |    7 +-
 .../util/QualifierEncodingSchemeTest.java       |  119 +
 .../java/org/apache/phoenix/util/TestUtil.java  |   89 +-
 phoenix-flume/pom.xml                           |   12 +-
 .../phoenix/flume/CsvEventSerializerIT.java     |  416 ++
 .../org/apache/phoenix/flume/PhoenixSinkIT.java |    2 +-
 .../apache/phoenix/flume/FlumeConstants.java    |   14 +-
 .../flume/serializer/CsvEventSerializer.java    |  196 +
 .../flume/serializer/EventSerializers.java      |    4 +-
 phoenix-hive/pom.xml                            |   15 +-
 .../phoenix/hive/BaseHivePhoenixStoreIT.java    |  165 +
 .../apache/phoenix/hive/HiveMapReduceIT.java    |   34 +
 .../apache/phoenix/hive/HivePhoenixStoreIT.java |  330 +-
 .../org/apache/phoenix/hive/HiveTestUtil.java   |   22 +-
 .../java/org/apache/phoenix/hive/HiveTezIT.java |   34 +
 .../apache/phoenix/hive/PhoenixMetaHook.java    |   37 +-
 .../org/apache/phoenix/hive/PhoenixSerDe.java   |    9 +-
 .../apache/phoenix/hive/PhoenixSerializer.java  |    4 +
 .../phoenix/hive/PhoenixStorageHandler.java     |    5 +
 .../hive/mapreduce/PhoenixInputFormat.java      |   56 +-
 .../hive/mapreduce/PhoenixRecordReader.java     |    1 +
 .../hive/mapreduce/PhoenixResultWritable.java   |   12 +-
 .../phoenix/hive/query/PhoenixQueryBuilder.java |  370 +-
 .../phoenix/hive/util/ColumnMappingUtils.java   |   76 +
 .../hive/util/PhoenixConnectionUtil.java        |   19 +
 .../hive/util/PhoenixStorageHandlerUtil.java    |   46 +-
 .../hive/query/PhoenixQueryBuilderTest.java     |   97 +-
 phoenix-kafka/pom.xml                           |    2 +-
 phoenix-kafka/src/it/resources/consumer.props   |   22 +-
 phoenix-kafka/src/it/resources/producer.props   |   22 +-
 phoenix-pherf/pom.xml                           |    2 +-
 phoenix-pig/pom.xml                             |    2 +-
 phoenix-protocol/src/main/PTable.proto          |    9 +
 .../src/main/ServerCachingService.proto         |   35 +
 phoenix-queryserver-client/pom.xml              |    2 +-
 .../queryserver/client/SqllineWrapper.java      |   18 +-
 phoenix-queryserver/pom.xml                     |    2 +-
 .../phoenix/queryserver/server/QueryServer.java |    5 +-
 phoenix-server/pom.xml                          |    2 +-
 phoenix-spark/pom.xml                           |    2 +-
 .../org/apache/phoenix/spark/PhoenixRDD.scala   |    4 +
 phoenix-tracing-webapp/pom.xml                  |    2 +-
 pom.xml                                         |    6 +-
 292 files changed, 20556 insertions(+), 4223 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
index 246ecd4,cb3b4b3..aac9586
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
@@@ -36,9 -36,10 +36,9 @@@ import org.apache.phoenix.end2end.Paral
  import org.apache.phoenix.execute.MutationState;
  import org.apache.phoenix.jdbc.PhoenixConnection;
  import org.apache.phoenix.query.QueryServices;
 -import org.apache.phoenix.schema.PTableImpl;
++import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
  import org.apache.phoenix.util.PropertiesUtil;
  import org.apache.phoenix.util.SchemaUtil;
--import org.apache.tephra.Transaction.VisibilityLevel;
  import org.junit.Test;
  import org.junit.runner.RunWith;
  import org.junit.runners.Parameterized;
@@@ -254,7 -266,7 +254,7 @@@ public class TxCheckpointIT extends Par
  		long wp = state.getWritePointer();
  		conn.createStatement().execute(
  				"upsert into " + fullTableName + " select max(id)+1, 'a4', 'b4' from " + fullTableName + "");
--		assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
++		assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
  				state.getVisibilityLevel());
  		assertEquals(wp, state.getWritePointer()); // Make sure write ptr
  													// didn't move
@@@ -266,7 -278,7 +266,7 @@@
  
  		conn.createStatement().execute(
  				"upsert into " + fullTableName + " select max(id)+1, 'a5', 'b5' from " + fullTableName + "");
--		assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
++		assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
  				state.getVisibilityLevel());
  		assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr
  														// moves
@@@ -279,7 -291,7 +279,7 @@@
  		
  		conn.createStatement().execute(
  				"upsert into " + fullTableName + " select max(id)+1, 'a6', 'b6' from " + fullTableName + "");
--		assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
++		assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT,
  				state.getVisibilityLevel());
  		assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr
  														// moves
@@@ -318,7 -328,7 +318,7 @@@
  	        state.startTransaction();
  	        long wp = state.getWritePointer();
  	        conn.createStatement().execute("delete from " + fullTableName + "1 where id1=fk1b AND fk1b=id1");
--	        assertEquals(VisibilityLevel.SNAPSHOT, state.getVisibilityLevel());
++	        assertEquals(PhoenixVisibilityLevel.SNAPSHOT, state.getVisibilityLevel());
  	        assertEquals(wp, state.getWritePointer()); // Make sure write ptr didn't move
  	
  	        rs = conn.createStatement().executeQuery("select /*+ NO_INDEX */ id1 from " + fullTableName + "1");
@@@ -336,7 -346,7 +336,7 @@@
  	        assertFalse(rs.next());
  	
  	        conn.createStatement().execute("delete from " + fullTableName + "1 where id1 in (select fk1a from " + fullTableName + "1 join " + fullTableName + "2 on (fk2=id1))");
--	        assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
++	        assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
  	        assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr moved
  	
  	        rs = conn.createStatement().executeQuery("select /*+ NO_INDEX */ id1 from " + fullTableName + "1");
@@@ -353,7 -363,7 +353,7 @@@
              stmt.executeUpdate("upsert into " + fullTableName + "2 values (2, 4)");
  
              conn.createStatement().execute("delete from " + fullTableName + "1 where id1 in (select fk1a from " + fullTableName + "1 join " + fullTableName + "2 on (fk2=id1))");
--            assertEquals(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
++            assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
              assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr moved
      
              rs = conn.createStatement().executeQuery("select /*+ NO_INDEX */ id1 from " + fullTableName + "1");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 321d117,e042150..4addec2
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@@ -313,9 -335,9 +335,9 @@@ abstract public class BaseScannerRegion
              final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
              final Region dataRegion, final IndexMaintainer indexMaintainer,
              final byte[][] viewConstants, final TupleProjector projector,
-             final ImmutableBytesWritable ptr) {
+             final ImmutableBytesWritable ptr, final boolean useQualiferAsListIndex) {
          return getWrappedScanner(c, s, null, null, offset, scan, dataColumns, tupleProjector,
-                 dataRegion, indexMaintainer, viewConstants, null, null, projector, ptr);
 -                dataRegion, indexMaintainer, null, viewConstants, null, null, projector, ptr, useQualiferAsListIndex);
++                dataRegion, indexMaintainer, viewConstants, null, null, projector, ptr, useQualiferAsListIndex);
      }
  
      /**
@@@ -330,16 -352,18 +352,16 @@@
       * @param tupleProjector
       * @param dataRegion
       * @param indexMaintainer
 -     * @param tx current transaction
       * @param viewConstants
       */
-     protected RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
+     RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
              final RegionScanner s, final Set<KeyValueColumnExpression> arrayKVRefs,
              final Expression[] arrayFuncRefs, final int offset, final Scan scan,
              final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
              final Region dataRegion, final IndexMaintainer indexMaintainer,
 -            Transaction tx, 
              final byte[][] viewConstants, final KeyValueSchema kvSchema,
              final ValueBitSet kvSchemaBitSet, final TupleProjector projector,
-             final ImmutableBytesWritable ptr) {
+             final ImmutableBytesWritable ptr, final boolean useQualifierAsListIndex) {
          return new RegionScanner() {
  
              private boolean hasReferences = checkForReferenceFiles();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 0e0e3ba,02b05f9..c9156b4
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@@ -208,18 -218,26 +217,24 @@@ public class ScanRegionObserver extend
          if (dataColumns != null) {
              tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
              dataRegion = c.getEnvironment().getRegion();
-             byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
-             List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
+             boolean useProto = false;
+             byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
+             useProto = localIndexBytes != null;
+             if (localIndexBytes == null) {
+                 localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
+             }
+             List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
              indexMaintainer = indexMaintainers.get(0);
              viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
 -            byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
 -            tx = MutationState.decodeTransaction(txState);
          }
  
          final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
          final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
+         boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(getMinMaxQualifiersFromScan(scan)) && scan.getAttribute(BaseScannerRegionObserver.TOPN) != null;
          innerScanner =
                  getWrappedScanner(c, innerScanner, arrayKVRefs, arrayFuncRefs, offset, scan,
 -                    dataColumns, tupleProjector, dataRegion, indexMaintainer, tx,
 +                    dataColumns, tupleProjector, dataRegion, indexMaintainer,
-                     viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr);
+                     viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr, useQualifierAsIndex);
  
          final ImmutableBytesPtr tenantId = ScanUtil.getTenantId(scan);
          if (j != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 23c8b2a,d32199b..2b72be1
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@@ -279,9 -322,25 +279,12 @@@ public class MutationState implements S
                      break;
                  }
              }
 +
 +            phoenixTransactionContext.checkpoint(hasUncommittedData);
 +
+             if (hasUncommittedData) {
 -                try {
 -                    if (txContext == null) {
 -                        currentTx = tx = connection.getQueryServices().getTransactionSystemClient().checkpoint(currentTx);
 -                    }  else {
 -                        txContext.checkpoint();
 -                        currentTx = tx = txContext.getCurrentTransaction();
 -                    }
 -                    // Since we've checkpointed, we can clear out uncommitted set, since a statement run afterwards
 -                    // should see all this data.
 -                    uncommittedPhysicalNames.clear();
 -                } catch (TransactionFailureException e) {
 -                    throw new SQLException(e);
 -                } 
++                uncommittedPhysicalNames.clear();
+             }
 -            // Since we're querying our own table while mutating it, we must exclude
 -            // see our current mutations, otherwise we can get erroneous results (for DELETE)
 -            // or get into an infinite loop (for UPSERT SELECT).
 -            currentTx.setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
              return true;
          }
          return false;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java
index 8658524,9edcafc..18b9edd
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java
@@@ -45,14 -47,13 +45,14 @@@ public class IndexMetaDataCacheFactory 
      }
  
      @Override
-     public Closeable newCache (ImmutableBytesWritable cachePtr, byte[] txState, final MemoryChunk chunk) throws SQLException {
+     public Closeable newCache (ImmutableBytesWritable cachePtr, byte[] txState, final MemoryChunk chunk, boolean useProtoForIndexMaintainer) throws SQLException {
          // just use the standard keyvalue builder - this doesn't really need to be fast
 +        
          final List<IndexMaintainer> maintainers = 
-                 IndexMaintainer.deserialize(cachePtr, GenericKeyValueBuilder.INSTANCE);
+                 IndexMaintainer.deserialize(cachePtr, GenericKeyValueBuilder.INSTANCE, useProtoForIndexMaintainer);
 -        final Transaction txn;
 +        final PhoenixTransactionContext txnContext;
          try {
 -            txn = txState.length!=0 ? MutationState.decodeTransaction(txState) : null;
 +            txnContext = txState.length != 0 ? TransactionFactory.getTransactionFactory().getTransactionContext(txState) : null;
          } catch (IOException e) {
              throw new SQLException(e);
          }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
index 82fe2f3,39473dc..fa2fed2
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
@@@ -47,11 -47,16 +47,16 @@@ public class PhoenixIndexMetaData imple
          if (attributes == null) { return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE; }
          byte[] uuid = attributes.get(PhoenixIndexCodec.INDEX_UUID);
          if (uuid == null) { return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE; }
-         byte[] md = attributes.get(PhoenixIndexCodec.INDEX_MD);
+         boolean useProto = false;
+         byte[] md = attributes.get(PhoenixIndexCodec.INDEX_PROTO_MD);
+         useProto = md != null;
+         if (md == null) {
+             md = attributes.get(PhoenixIndexCodec.INDEX_MD);
+         }
          byte[] txState = attributes.get(BaseScannerRegionObserver.TX_STATE);
          if (md != null) {
-             final List<IndexMaintainer> indexMaintainers = IndexMaintainer.deserialize(md);
+             final List<IndexMaintainer> indexMaintainers = IndexMaintainer.deserialize(md, useProto);
 -            final Transaction txn = MutationState.decodeTransaction(txState);
 +            final PhoenixTransactionContext txnContext = TransactionFactory.getTransactionFactory().getTransactionContext(txState);
              return new IndexMetaDataCache() {
  
                  @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index 8a4e284,937ac14..d122d0c
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@@ -3,8 -3,7 +3,9 @@@ package org.apache.phoenix.transaction
  import java.sql.SQLException;
  import java.util.concurrent.TimeoutException;
  
++import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
  import org.apache.phoenix.schema.PTable;
 +import org.slf4j.Logger;
  
  public class OmidTransactionContext implements PhoenixTransactionContext {
  
@@@ -74,33 -74,4 +75,45 @@@
          return 0;
      }
  
 +    @Override
 +    public long getWritePointer() {
 +        // TODO Auto-generated method stub
 +        return 0;
 +    }
 +
 +    @Override
 +    public PhoenixVisibilityLevel getVisibilityLevel() {
 +        // TODO Auto-generated method stub
 +        return null;
 +    }
 +
 +    @Override
 +    public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) {
 +        // TODO Auto-generated method stub
 +        
 +    }
 +
 +    @Override
 +    public byte[] encodeTransaction() throws SQLException {
 +        // TODO Auto-generated method stub
 +        return null;
 +    }
 +
 +    @Override
 +    public long getMaxTransactionsPerSecond() {
 +        // TODO Auto-generated method stub
 +        return 0;
 +    }
++
++    @Override
++    public boolean isPreExistingVersion(long version) {
++        // TODO Auto-generated method stub
++        return false;
++    }
++
++    @Override
++    public BaseRegionObserver getCoProcessor() {
++        // TODO Auto-generated method stub
++        return null;
++    }
  }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index bd63930,87b68f9..0854f4e
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@@ -1,7 -1,6 +1,8 @@@
  package org.apache.phoenix.transaction;
  
++import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
  import org.apache.phoenix.schema.PTable;
 +import org.slf4j.Logger;
  
  import java.sql.SQLException;
  import java.util.concurrent.TimeoutException;
@@@ -9,19 -8,6 +10,23 @@@
  public interface PhoenixTransactionContext {
  
      /**
 +     * 
 +     * Visibility levels needed for checkpointing and  
 +     *
 +     */
 +    public enum PhoenixVisibilityLevel {
 +        SNAPSHOT,
 +        SNAPSHOT_EXCLUDE_CURRENT,
 +        SNAPSHOT_ALL
 +      }
 +
-     public static final String TX_ROLLBACK_ATTRIBUTE_KEY = "phoenix.tx.rollback"; 
++    public static final String TX_ROLLBACK_ATTRIBUTE_KEY = "tephra.tx.rollback"; //"phoenix.tx.rollback"; 
++
++    public static final String PROPERTY_TTL = "dataset.table.ttl";
++
++    public static final String READ_NON_TX_DATA = "data.tx.read.pre.existing";
 +
 +    /**
       * Starts a transaction
       *
       * @throws SQLException
@@@ -93,31 -79,5 +98,44 @@@
      /**
       * Returns transaction snapshot id
       */
 -    long getReadPointer();
 +    public long getReadPointer();
 +
 +    /**
 +     * Returns transaction write pointer. After checkpoint the write pointer is different than the initial one  
 +     */
 +    public long getWritePointer();
 +
 +    /**
 +     * Set visibility level
 +     */
 +    public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel);
 +
 +    /**
 +     * Returns visibility level
 +     */
 +    public PhoenixVisibilityLevel getVisibilityLevel();
 +
 +    /**
 +     * Encode transaction
 +     */
 +    public byte[] encodeTransaction() throws SQLException;
 +
 +    /**
 +     * 
 +     * @return max transactions per second
 +     */
 +    public long getMaxTransactionsPerSecond();
++
++    /**
++     *
++     * @param version
++     * @return
++     */
++    public boolean isPreExistingVersion(long version);
++
++    /**
++     *
++     * @return the coprocessor
++     */
++    public BaseRegionObserver getCoProcessor();
  }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b838cfc/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index cfa3ac3,8fc5e0f..a5e6e64
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@@ -8,6 -7,6 +8,7 @@@ import java.util.concurrent.TimeUnit
  import java.util.concurrent.TimeoutException;
  
  import org.apache.hadoop.hbase.HConstants;
++import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
  import org.apache.hadoop.hbase.util.Bytes;
  import org.apache.phoenix.exception.SQLExceptionCode;
  import org.apache.phoenix.exception.SQLExceptionInfo;
@@@ -21,11 -17,8 +22,13 @@@ import org.apache.tephra.TransactionCod
  import org.apache.tephra.TransactionConflictException;
  import org.apache.tephra.TransactionContext;
  import org.apache.tephra.TransactionFailureException;
 +import org.apache.tephra.TransactionManager;
  import org.apache.tephra.TransactionSystemClient;
  import org.apache.tephra.Transaction.VisibilityLevel;
 +import org.apache.tephra.TxConstants;
++import org.apache.tephra.hbase.coprocessor.TransactionProcessor;
 +import org.apache.tephra.inmemory.InMemoryTxSystemClient;
++import org.apache.tephra.util.TxUtils;
  import org.apache.tephra.visibility.FenceWait;
  import org.apache.tephra.visibility.VisibilityFence;
  
@@@ -43,30 -32,14 +46,34 @@@ public class TephraTransactionContext i
      private TransactionSystemClient txServiceClient;
      private TransactionFailureException e;
  
 -    public TephraTransactionContext(PhoenixTransactionContext ctx, PhoenixConnection connection, boolean threadSafe) {
 +    public TephraTransactionContext() {
 +        this.txServiceClient = null;
 +        this.txAwares = Lists.newArrayList();
 +        this.txContext = null;
 +    }
 +
 +    public TephraTransactionContext(byte[] txnBytes) throws IOException {
 +        this();
-         this.tx = (txnBytes != null && txnBytes.length > 0) ? CODEC.decode(txnBytes) : null;
++        this.tx = (txnBytes != null && txnBytes.length > 0) ? CODEC
++                .decode(txnBytes) : null;
 +    }
 +
 +    public TephraTransactionContext(PhoenixConnection connection) {
-         this.txServiceClient = connection.getQueryServices().getTransactionSystemClient();
++        this.txServiceClient = connection.getQueryServices()
++                .getTransactionSystemClient();
 +        this.txAwares = Collections.emptyList();
 +        this.txContext = new TransactionContext(txServiceClient);
 +    }
  
-     public TephraTransactionContext(PhoenixTransactionContext ctx, PhoenixConnection connection, boolean subTask) {
--        this.txServiceClient = connection.getQueryServices().getTransactionSystemClient();
++    public TephraTransactionContext(PhoenixTransactionContext ctx,
++            PhoenixConnection connection, boolean subTask) {
++        this.txServiceClient = connection.getQueryServices()
++                .getTransactionSystemClient();
  
--        assert(ctx instanceof TephraTransactionContext);
++        assert (ctx instanceof TephraTransactionContext);
          TephraTransactionContext tephraTransactionContext = (TephraTransactionContext) ctx;
  
 -        if (threadSafe) {
 +        if (subTask) {
              this.tx = tephraTransactionContext.getTransaction();
              this.txAwares = Lists.newArrayList();
              this.txContext = null;
@@@ -81,51 -58,41 +88,53 @@@
      @Override
      public void begin() throws SQLException {
          if (txContext == null) {
--            throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build().buildException();
++            throw new SQLExceptionInfo.Builder(
++                    SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build()
++                    .buildException();
          }
  
-         System.out.println("BEGIN");
          try {
              txContext.start();
          } catch (TransactionFailureException e) {
--            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
--            .setMessage(e.getMessage())
--            .setRootCause(e)
--            .build().buildException();
++            throw new SQLExceptionInfo.Builder(
++                    SQLExceptionCode.TRANSACTION_FAILED)
++                    .setMessage(e.getMessage()).setRootCause(e).build()
++                    .buildException();
          }
      }
  
      @Override
      public void commit() throws SQLException {
-         
++
 +        if (txContext == null || !isTransactionRunning()) {
 +            return;
 +        }
-         
++
          try {
 -            assert(txContext != null);
              txContext.finish();
          } catch (TransactionFailureException e) {
              this.e = e;
++
              if (e instanceof TransactionConflictException) {
--                throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION)
--                    .setMessage(e.getMessage())
--                    .setRootCause(e)
--                    .build().buildException();
++                throw new SQLExceptionInfo.Builder(
++                        SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION)
++                        .setMessage(e.getMessage()).setRootCause(e).build()
++                        .buildException();
              }
--            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
--                .setMessage(e.getMessage())
--                .setRootCause(e)
--                .build().buildException();
++            throw new SQLExceptionInfo.Builder(
++                    SQLExceptionCode.TRANSACTION_FAILED)
++                    .setMessage(e.getMessage()).setRootCause(e).build()
++                    .buildException();
          }
      }
  
      @Override
      public void abort() throws SQLException {
-         
++
 +        if (txContext == null || !isTransactionRunning()) {
 +            return;
 +        }
-             
++
          try {
              if (e != null) {
                  txContext.abort(e);
@@@ -135,10 -102,10 +144,10 @@@
              }
          } catch (TransactionFailureException e) {
              this.e = null;
--            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
--                .setMessage(e.getMessage())
--                .setRootCause(e)
--                .build().buildException();
++            throw new SQLExceptionInfo.Builder(
++                    SQLExceptionCode.TRANSACTION_FAILED)
++                    .setMessage(e.getMessage()).setRootCause(e).build()
++                    .buildException();
          }
      }
  
@@@ -148,8 -115,8 +157,8 @@@
              try {
                  if (txContext == null) {
                      tx = txServiceClient.checkpoint(tx);
--                }  else {
--                    assert(txContext != null);
++                } else {
++                    assert (txContext != null);
                      txContext.checkpoint();
                      tx = txContext.getCurrentTransaction();
                  }
@@@ -158,45 -125,30 +167,44 @@@
              }
          }
  
 +        // Since we're querying our own table while mutating it, we must exclude
-         // see our current mutations, otherwise we can get erroneous results (for DELETE)
++        // see our current mutations, otherwise we can get erroneous results
++        // (for DELETE)
 +        // or get into an infinite loop (for UPSERT SELECT).
          if (txContext == null) {
              tx.setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
 -        }
 -        else {
 -            assert(txContext != null);
 -            txContext.getCurrentTransaction().setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
++        } else {
++            assert (txContext != null);
++            txContext.getCurrentTransaction().setVisibility(
++                    VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
          }
-         else {
-             assert(txContext != null);
-             txContext.getCurrentTransaction().setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
-         }
-     }
- 
-     private Transaction getCurrentTransaction() {
-         if (this.txContext != null) {
-             return this.txContext.getCurrentTransaction();
-         }
- 
-         return this.tx;
      }
  
      @Override
-     public void commitDDLFence(PTable dataTable, Logger logger) throws SQLException {
 -    public void commitDDLFence(PTable dataTable) throws SQLException,
 -            InterruptedException, TimeoutException {
++    public void commitDDLFence(PTable dataTable, Logger logger)
++            throws SQLException {
          byte[] key = dataTable.getName().getBytes();
 +
          try {
--            FenceWait fenceWait = VisibilityFence.prepareWait(key, txServiceClient);
++            FenceWait fenceWait = VisibilityFence.prepareWait(key,
++                    txServiceClient);
              fenceWait.await(10000, TimeUnit.MILLISECONDS);
-             
++
 +            if (logger.isInfoEnabled()) {
-                 logger.info("Added write fence at ~" + getCurrentTransaction().getReadPointer());
++                logger.info("Added write fence at ~"
++                        + getCurrentTransaction().getReadPointer());
 +            }
          } catch (InterruptedException e) {
              Thread.currentThread().interrupt();
--            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
++            throw new SQLExceptionInfo.Builder(
++                    SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e)
++                    .build().buildException();
          } catch (TimeoutException | TransactionFailureException e) {
--            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_UNABLE_TO_GET_WRITE_FENCE)
--            .setSchemaName(dataTable.getSchemaName().getString())
--            .setTableName(dataTable.getTableName().getString())
--            .build().buildException();
++            throw new SQLExceptionInfo.Builder(
++                    SQLExceptionCode.TX_UNABLE_TO_GET_WRITE_FENCE)
++                    .setSchemaName(dataTable.getSchemaName().getString())
++                    .setTableName(dataTable.getTableName().getString()).build()
++                    .buildException();
          }
      }
  
@@@ -210,10 -161,9 +218,11 @@@
          } else {
              this.txContext.addTransactionAware(logicalTxAware);
          }
 +
          byte[] physicalKey = table.getPhysicalName().getBytes();
          if (Bytes.compareTo(physicalKey, logicalKey) != 0) {
--            TransactionAware physicalTxAware = VisibilityFence.create(physicalKey);
++            TransactionAware physicalTxAware = VisibilityFence
++                    .create(physicalKey);
              if (this.txContext == null) {
                  this.txAwares.add(physicalTxAware);
              } else {
@@@ -224,9 -174,11 +233,9 @@@
  
      @Override
      public void join(PhoenixTransactionContext ctx) {
--        assert(ctx instanceof TephraTransactionContext);
++        assert (ctx instanceof TephraTransactionContext);
          TephraTransactionContext tephraContext = (TephraTransactionContext) ctx;
  
 -        tephraContext.getAwares();
 -
          if (txContext != null) {
              for (TransactionAware txAware : tephraContext.getAwares()) {
                  txContext.addTransactionAware(txAware);
@@@ -236,152 -188,57 +245,129 @@@
          }
      }
  
++    private Transaction getCurrentTransaction() {
++        return tx != null ? tx : txContext != null ? txContext.getCurrentTransaction() : null;
++    }
++
      @Override
      public boolean isTransactionRunning() {
--        if (this.txContext != null) {
--            return (this.txContext.getCurrentTransaction() != null);
--        }
--
--        if (this.tx != null) {
--            return true;
--        }
--
--        return false;
++        return getCurrentTransaction() != null;
      }
  
      @Override
      public void reset() {
          tx = null;
          txAwares.clear();
++        this.e = null;
      }
  
      @Override
      public long getTransactionId() {
--        if (this.txContext != null) {
--            return txContext.getCurrentTransaction().getTransactionId();
-         }
- 
-         if (tx != null) {
-             return tx.getTransactionId();
-         }
- 
-         return HConstants.LATEST_TIMESTAMP;
++        Transaction tx = getCurrentTransaction();
++        return tx == null ? HConstants.LATEST_TIMESTAMP : tx.getTransactionId(); // First write pointer - won't change with checkpointing
 +    }
 +
 +    @Override
 +    public long getReadPointer() {
-         if (this.txContext != null) {
-             return txContext.getCurrentTransaction().getReadPointer();
-         }
++        Transaction tx = getCurrentTransaction();
 +
-         if (tx != null) {
-             return tx.getReadPointer();
++        if (tx == null) {
++            return (-1);
          }
  
-         return (-1);
 -        if (tx != null) {
 -            return tx.getTransactionId();
++        return tx.getReadPointer();
 +    }
 +
 +    // For testing
 +    @Override
 +    public long getWritePointer() {
-         if (this.txContext != null) {
-             return txContext.getCurrentTransaction().getWritePointer();
-         }
- 
-         if (tx != null) {
-             return tx.getWritePointer();
-         }
- 
-         return HConstants.LATEST_TIMESTAMP;
++        Transaction tx = getCurrentTransaction();
++        return tx == null ? HConstants.LATEST_TIMESTAMP : tx.getWritePointer();
 +    }
 +
 +    @Override
 +    public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) {
 +        VisibilityLevel tephraVisibilityLevel = null;
 +
-         switch(visibilityLevel) {
++        switch (visibilityLevel) {
 +        case SNAPSHOT:
 +            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT;
 +            break;
 +        case SNAPSHOT_EXCLUDE_CURRENT:
 +            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
 +            break;
 +        case SNAPSHOT_ALL:
++            System.out.println("OHAD Move to SNAPSHOT_ALL ");
++            System.out.flush();
 +            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_ALL;
 +            break;
 +        default:
-             assert(false);               
++            assert (false);
          }
  
-         if (this.txContext != null) {
-             txContext.getCurrentTransaction().setVisibility(tephraVisibilityLevel);
-         } else if (tx != null) {
-             tx.setVisibility(tephraVisibilityLevel);
-         } else {
-             assert(false);
-         }
 -        return HConstants.LATEST_TIMESTAMP;
++        Transaction tx = getCurrentTransaction();
++        assert(tx != null);
++        tx.setVisibility(tephraVisibilityLevel);
      }
-     
-     // For testing
+ 
      @Override
 -    public long getReadPointer() {
 -        if (this.txContext != null) {
 -            return txContext.getCurrentTransaction().getReadPointer();
 +    public PhoenixVisibilityLevel getVisibilityLevel() {
 +        VisibilityLevel visibilityLevel = null;
 +
-         if (this.txContext != null) {
-             visibilityLevel = txContext.getCurrentTransaction().getVisibilityLevel();
-         } else if (tx != null) {
-             visibilityLevel = tx.getVisibilityLevel();
-         }
++        Transaction tx = getCurrentTransaction();
++        assert(tx != null);
++        visibilityLevel = tx.getVisibilityLevel();
 +
 +        PhoenixVisibilityLevel phoenixVisibilityLevel;
-         switch(visibilityLevel) {
++        switch (visibilityLevel) {
 +        case SNAPSHOT:
 +            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT;
 +            break;
 +        case SNAPSHOT_EXCLUDE_CURRENT:
 +            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
 +            break;
 +        case SNAPSHOT_ALL:
 +            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_ALL;
 +        default:
 +            phoenixVisibilityLevel = null;
          }
  
 -        if (tx != null) {
 -            return tx.getReadPointer();
 +        return phoenixVisibilityLevel;
 +    }
 +
 +    @Override
 +    public byte[] encodeTransaction() throws SQLException {
- 
-         Transaction transaction = null;
- 
-         if (this.txContext != null) {
-             transaction = txContext.getCurrentTransaction();
-         } else if (tx != null) {
-             transaction =  tx;
-         }
- 
-         assert (transaction != null);
++        Transaction tx = getCurrentTransaction();
++        assert (tx != null);
 +
 +        try {
-             return CODEC.encode(transaction);
++            return CODEC.encode(tx);
 +        } catch (IOException e) {
 +            throw new SQLException(e);
          }
 +    }
-     
++
 +    @Override
 +    public long getMaxTransactionsPerSecond() {
 +        return TxConstants.MAX_TX_PER_MS;
 +    }
 +
++    @Override
++    public boolean isPreExistingVersion(long version) {
++        return TxUtils.isPreExistingVersion(version);
++    }
+ 
 -        return (-1);
++    @Override
++    public BaseRegionObserver getCoProcessor() {
++        return new TransactionProcessor();
+     }
  
 -   /**
 -    * TephraTransactionContext specific functions
 -    */
 +    /**
-     * TephraTransactionContext specific functions
-     */
++     * TephraTransactionContext specific functions
++     */
  
      Transaction getTransaction() {
--        return this.tx;
++        return this.getCurrentTransaction();
      }
  
      TransactionContext getContext() {
@@@ -397,8 -254,32 +383,8 @@@
              txContext.addTransactionAware(txAware);
          } else if (this.tx != null) {
              txAwares.add(txAware);
-             assert(tx != null);
++            assert (tx != null);
 +            txAware.startTx(tx);
          }
      }
 -
 -    // For testing
 -    public long getWritePointer() {
 -        if (this.txContext != null) {
 -            return txContext.getCurrentTransaction().getWritePointer();
 -        }
 -
 -        if (tx != null) {
 -            return tx.getWritePointer();
 -        }
 -
 -        return HConstants.LATEST_TIMESTAMP;
 -    }
 -
 -    // For testing
 -    public VisibilityLevel getVisibilityLevel() {
 -        if (this.txContext != null) {
 -            return txContext.getCurrentTransaction().getVisibilityLevel();
 -        }
 -
 -        if (tx != null) {
 -            return tx.getVisibilityLevel();
 -        }
 -
 -        return null;
 -    }
  }


[35/46] phoenix git commit: remove additional tephra stuff

Posted by td...@apache.org.
 remove additional tephra stuff


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f090dd24
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f090dd24
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f090dd24

Branch: refs/heads/omid
Commit: f090dd24e403ae8656f5041cd86cb09eb7ad68ff
Parents: f5b19f1
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Sun May 7 15:48:07 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Sun May 7 15:48:07 2017 +0300

----------------------------------------------------------------------
 .../phoenix/tx/FlappingTransactionIT.java       |  8 +--
 .../phoenix/tx/ParameterizedTransactionIT.java  |  4 +-
 .../org/apache/phoenix/tx/TransactionIT.java    |  1 -
 .../apache/phoenix/index/IndexMaintainer.java   |  4 +-
 .../phoenix/query/ConnectionQueryServices.java  |  2 -
 .../query/ConnectionQueryServicesImpl.java      | 62 ++++++-------------
 .../query/ConnectionlessQueryServicesImpl.java  | 15 +----
 .../query/DelegateConnectionQueryServices.java  |  6 --
 .../org/apache/phoenix/schema/PTableImpl.java   |  6 +-
 .../transaction/OmidTransactionContext.java     | 25 ++++++++
 .../transaction/PhoenixTransactionContext.java  | 26 ++++++++
 .../transaction/TephraTransactionContext.java   | 64 +++++++++++++++++---
 12 files changed, 136 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
index 0bc7c24..97b5e71 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
@@ -47,7 +47,6 @@ import org.apache.phoenix.transaction.PhoenixTransactionalTable;
 import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TestUtil;
-import org.apache.tephra.TransactionSystemClient;
 import org.junit.Test;
 
 /**
@@ -213,8 +212,6 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
         String fullTableName = generateUniqueName();
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
         
-        TransactionSystemClient txServiceClient = pconn.getQueryServices().getTransactionSystemClient();
-
         Statement stmt = conn.createStatement();
         stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
         HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
@@ -227,7 +224,6 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
             assertEquals(1,rs.getInt(1));
         }
 
-        // Use HBase level Tephra APIs to start a new transaction
         //TransactionAwareHTable txAware = new TransactionAwareHTable(htable, TxConstants.ConflictDetection.ROW);
         PhoenixTransactionContext txContext = TransactionFactory.getTransactionFactory().getTransactionContext(pconn);
         PhoenixTransactionalTable txTable = TransactionFactory.getTransactionFactory().getTransactionalTable(txContext, htable);
@@ -260,7 +256,7 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
         assertTrue(rs.next());
         assertEquals(3,rs.getInt(1));
         
-        // Use Tephra APIs directly to finish (i.e. commit) the transaction
+        // Use TM APIs directly to finish (i.e. commit) the transaction
         txContext.commit();
         
         // Confirm that attempt to commit row with conflict fails
@@ -306,7 +302,7 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
         assertTrue(rs.next());
         assertEquals(4,rs.getInt(1));
 
-        // Use Tephra APIs directly to abort (i.e. rollback) the transaction
+        // Use TM APIs directly to abort (i.e. rollback) the transaction
         txContext.abort();
         
         rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
index badf39b..fcb463c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
@@ -53,10 +53,10 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TestUtil;
-import org.apache.tephra.TxConstants;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -391,7 +391,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
         admin.createTable(desc);
         ddl = "CREATE TABLE " + t2 + " (k varchar primary key) transactional=true";
         conn.createStatement().execute(ddl);
-        assertEquals(Boolean.TRUE.toString(), admin.getTableDescriptor(TableName.valueOf(t2)).getValue(TxConstants.READ_NON_TX_DATA));
+        assertEquals(Boolean.TRUE.toString(), admin.getTableDescriptor(TableName.valueOf(t2)).getValue(PhoenixTransactionContext.READ_NON_TX_DATA));
         
         // Should be ok, as HBase metadata should match existing metadata.
         ddl = "CREATE TABLE IF NOT EXISTS " + t1 + " (k varchar primary key)"; 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index 78c510b..46cc953 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -47,7 +47,6 @@ import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
-import org.apache.tephra.TxConstants;
 import org.junit.Test;
 
 public class TransactionIT  extends ParallelStatsDisabledIT {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 2224e38..19ba609 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -100,6 +100,7 @@ import org.apache.phoenix.schema.ValueSchema.Field;
 import org.apache.phoenix.schema.tuple.BaseTuple;
 import org.apache.phoenix.schema.tuple.ValueGetterTuple;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.BitSet;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EncodedColumnsUtil;
@@ -108,7 +109,6 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
-import org.apache.tephra.TxConstants;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicate;
@@ -1064,7 +1064,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
             }
         	else if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()
         			// Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor
-        			|| (CellUtil.matchingQualifier(kv, TxConstants.FAMILY_DELETE_QUALIFIER) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) {
+        			|| (CellUtil.matchingQualifier(kv, TransactionFactory.getTransactionFactory().getTransactionContext().get_famility_delete_marker()) && CellUtil.matchingValue(kv, HConstants.EMPTY_BYTE_ARRAY))) {
         	    nDeleteCF++;
         	}
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 38580e4..45ab5fa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -46,7 +46,6 @@ import org.apache.phoenix.schema.SequenceAllocation;
 import org.apache.phoenix.schema.SequenceKey;
 import org.apache.phoenix.schema.stats.GuidePostsInfo;
 import org.apache.phoenix.schema.stats.GuidePostsKey;
-import org.apache.tephra.TransactionSystemClient;
 
 
 public interface ConnectionQueryServices extends QueryServices, MetaDataMutated {
@@ -132,7 +131,6 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
     public long clearCache() throws SQLException;
     public int getSequenceSaltBuckets();
 
-    TransactionSystemClient getTransactionSystemClient();
     public long getRenewLeaseThresholdMilliSeconds();
     public boolean isRenewingLeasesEnabled();
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 59252ad..815f669 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -219,6 +219,8 @@ import org.apache.phoenix.schema.types.PTinyint;
 import org.apache.phoenix.schema.types.PUnsignedTinyint;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.ConfigUtil;
@@ -232,11 +234,6 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.UpgradeUtil;
-import org.apache.tephra.TransactionSystemClient;
-import org.apache.tephra.TxConstants;
-import org.apache.tephra.distributed.PooledClientProvider;
-import org.apache.tephra.distributed.TransactionServiceClient;
-import org.apache.tephra.zookeeper.TephraZKClientService;
 import org.apache.twill.discovery.ZKDiscoveryService;
 import org.apache.twill.zookeeper.RetryStrategies;
 import org.apache.twill.zookeeper.ZKClientService;
@@ -287,7 +284,6 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
 
     private HConnection connection;
     private ZKClientService txZKClientService;
-    private TransactionServiceClient txServiceClient;
     private volatile boolean initialized;
     private volatile int nSequenceSaltBuckets;
 
@@ -396,32 +392,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
 
     }
 
-    @Override
-    public TransactionSystemClient getTransactionSystemClient() {
-        return txServiceClient;
-    }
-
     private void initTxServiceClient() {
-        String zkQuorumServersString = this.getProps().get(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM);
-        if (zkQuorumServersString==null) {
-            zkQuorumServersString = connectionInfo.getZookeeperQuorum()+":"+connectionInfo.getPort();
-        }
-
-        int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
-        // Create instance of the tephra zookeeper client
-        txZKClientService = ZKClientServices.delegate(
-            ZKClients.reWatchOnExpire(
-                ZKClients.retryOnFailure(
-                     new TephraZKClientService(zkQuorumServersString, timeOut, null,
-                             ArrayListMultimap.<String, byte[]>create()), 
-                         RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
-                     )
-                );
-        txZKClientService.startAndWait();
-        ZKDiscoveryService zkDiscoveryService = new ZKDiscoveryService(txZKClientService);
-        PooledClientProvider pooledClientProvider = new PooledClientProvider(
-                config, zkDiscoveryService);
-        this.txServiceClient = new TransactionServiceClient(config,pooledClientProvider);
+        txZKClientService = TransactionFactory.getTransactionFactory().getTransactionContext().setTransactionClient(config, props, connectionInfo);
     }
 
     private void openConnection() throws SQLException {
@@ -862,7 +834,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             }
             boolean isTransactional =
                     Boolean.TRUE.equals(tableProps.get(TableProperty.TRANSACTIONAL.name())) ||
-                    Boolean.TRUE.equals(tableProps.get(TxConstants.READ_NON_TX_DATA)); // For ALTER TABLE
+                    Boolean.TRUE.equals(tableProps.get(PhoenixTransactionContext.READ_NON_TX_DATA)); // For ALTER TABLE
             // TODO: better encapsulation for this
             // Since indexes can't have indexes, don't install our indexing coprocessor for indexes.
             // Also don't install on the SYSTEM.CATALOG and SYSTEM.STATS table because we use
@@ -1125,7 +1097,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 // If mapping an existing table as transactional, set property so that existing
                 // data is correctly read.
                 if (willBeTx) {
-                    newDesc.setValue(TxConstants.READ_NON_TX_DATA, Boolean.TRUE.toString());
+                    newDesc.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.TRUE.toString());
                 } else {
                     // If we think we're creating a non transactional table when it's already
                     // transactional, don't allow.
@@ -1134,7 +1106,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         .setSchemaName(SchemaUtil.getSchemaNameFromFullName(tableName))
                         .setTableName(SchemaUtil.getTableNameFromFullName(tableName)).build().buildException();
                     }
-                    newDesc.remove(TxConstants.READ_NON_TX_DATA);
+                    newDesc.remove(PhoenixTransactionContext.READ_NON_TX_DATA);
                 }
                 if (existingDesc.equals(newDesc)) {
                     return null; // Indicate that no metadata was changed
@@ -1754,7 +1726,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             origTableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size());
             tableDescriptors.add(tableDescriptor);
             origTableDescriptors.add(origTableDescriptor);
-            nonTxToTx = Boolean.TRUE.equals(tableProps.get(TxConstants.READ_NON_TX_DATA));
+            nonTxToTx = Boolean.TRUE.equals(tableProps.get(PhoenixTransactionContext.READ_NON_TX_DATA));
             /*
              * If the table was transitioned from non transactional to transactional, we need
              * to also transition the index tables.
@@ -1864,7 +1836,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 indexTableProps = Collections.<String,Object>emptyMap();
             } else {
                 indexTableProps = Maps.newHashMapWithExpectedSize(1);
-                indexTableProps.put(TxConstants.READ_NON_TX_DATA, Boolean.valueOf(txValue));
+                indexTableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.valueOf(txValue));
             }
             for (PTable index : table.getIndexes()) {
                 HTableDescriptor indexDescriptor = admin.getTableDescriptor(index.getPhysicalName().getBytes());
@@ -1877,7 +1849,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     HColumnDescriptor indexColDescriptor = indexDescriptor.getFamily(indexFamilyName);
                     HColumnDescriptor tableColDescriptor = tableDescriptor.getFamily(dataFamilyName);
                     indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions());
-                    indexColDescriptor.setValue(TxConstants.PROPERTY_TTL, tableColDescriptor.getValue(TxConstants.PROPERTY_TTL));
+                    indexColDescriptor.setValue(PhoenixTransactionContext.PROPERTY_TTL, tableColDescriptor.getValue(PhoenixTransactionContext.PROPERTY_TTL));
                 } else {
                     for (PColumnFamily family : index.getColumnFamilies()) {
                         byte[] familyName = family.getName().getBytes();
@@ -1885,7 +1857,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         HColumnDescriptor indexColDescriptor = indexDescriptor.getFamily(familyName);
                         HColumnDescriptor tableColDescriptor = tableDescriptor.getFamily(familyName);
                         indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions());
-                        indexColDescriptor.setValue(TxConstants.PROPERTY_TTL, tableColDescriptor.getValue(TxConstants.PROPERTY_TTL));
+                        indexColDescriptor.setValue(PhoenixTransactionContext.PROPERTY_TTL, tableColDescriptor.getValue(PhoenixTransactionContext.PROPERTY_TTL));
                     }
                 }
                 setTransactional(indexDescriptor, index.getType(), txValue, indexTableProps);
@@ -1921,7 +1893,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             HColumnDescriptor indexColDescriptor = indexDescriptor.getFamily(familyName);
             HColumnDescriptor tableColDescriptor = tableDescriptor.getFamily(familyName);
             indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions());
-            indexColDescriptor.setValue(TxConstants.PROPERTY_TTL, tableColDescriptor.getValue(TxConstants.PROPERTY_TTL));
+            indexColDescriptor.setValue(PhoenixTransactionContext.PROPERTY_TTL, tableColDescriptor.getValue(PhoenixTransactionContext.PROPERTY_TTL));
         } else {
             for (PColumnFamily family : table.getColumnFamilies()) {
                 byte[] familyName = family.getName().getBytes();
@@ -1929,7 +1901,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 if (indexColDescriptor != null) {
                     HColumnDescriptor tableColDescriptor = tableDescriptor.getFamily(familyName);
                     indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions());
-                    indexColDescriptor.setValue(TxConstants.PROPERTY_TTL, tableColDescriptor.getValue(TxConstants.PROPERTY_TTL));
+                    indexColDescriptor.setValue(PhoenixTransactionContext.PROPERTY_TTL, tableColDescriptor.getValue(PhoenixTransactionContext.PROPERTY_TTL));
                 }
             }
         }
@@ -1957,9 +1929,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
     private void setTransactional(HTableDescriptor tableDescriptor, PTableType tableType, String txValue, Map<String, Object> tableProps) throws SQLException {
         if (txValue == null) {
-            tableDescriptor.remove(TxConstants.READ_NON_TX_DATA);
+            tableDescriptor.remove(PhoenixTransactionContext.READ_NON_TX_DATA);
         } else {
-            tableDescriptor.setValue(TxConstants.READ_NON_TX_DATA, txValue);
+            tableDescriptor.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, txValue);
         }
         this.addCoprocessors(tableDescriptor.getName(), tableDescriptor, tableType, tableProps);
     }
@@ -2005,7 +1977,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 commonFamilyProps.put(propName, prop.getSecond());
                             } else if (propName.equals(PhoenixDatabaseMetaData.TRANSACTIONAL) && Boolean.TRUE.equals(propValue)) {
                                 willBeTransactional = isOrWillBeTransactional = true;
-                                tableProps.put(TxConstants.READ_NON_TX_DATA, propValue);
+                                tableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, propValue);
                             }
                         } else {
                             if (MetaDataUtil.isHColumnProperty(propName)) {
@@ -2172,10 +2144,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         if (props == null) {
                             props = new HashMap<String, Object>();
                         }
-                        props.put(TxConstants.PROPERTY_TTL, ttl);
+                        props.put(PhoenixTransactionContext.PROPERTY_TTL, ttl);
                         // Remove HBase TTL if we're not transitioning an existing table to become transactional
                         // or if the existing transactional table wasn't originally non transactional.
-                        if (!willBeTransactional && !Boolean.valueOf(newTableDescriptor.getValue(TxConstants.READ_NON_TX_DATA))) {
+                        if (!willBeTransactional && !Boolean.valueOf(newTableDescriptor.getValue(PhoenixTransactionContext.READ_NON_TX_DATA))) {
                             props.remove(TTL);
                         }
                     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 47ef954..8e72e74 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -80,15 +80,13 @@ import org.apache.phoenix.schema.TableAlreadyExistsException;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.stats.GuidePostsInfo;
 import org.apache.phoenix.schema.stats.GuidePostsKey;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.JDBCUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.SequenceUtil;
-import org.apache.tephra.TransactionManager;
-import org.apache.tephra.TransactionSystemClient;
-import org.apache.tephra.inmemory.InMemoryTxSystemClient;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -107,7 +105,6 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     private PMetaData metaData;
     private final Map<SequenceKey, SequenceInfo> sequenceMap = Maps.newHashMap();
     private final String userName;
-    private final TransactionSystemClient txSystemClient;
     private KeyValueBuilder kvBuilder;
     private volatile boolean initialized;
     private volatile SQLException initializationException;
@@ -119,7 +116,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
         super(services);
         userName = connInfo.getPrincipal();
         metaData = newEmptyMetaData();
-        
+
         // Use KeyValueBuilder that builds real KeyValues, as our test utils require this
         this.kvBuilder = GenericKeyValueBuilder.INSTANCE;
         Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
@@ -138,8 +135,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
         // Without making a copy of the configuration we cons up, we lose some of our properties
         // on the server side during testing.
         this.config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(config);
-        TransactionManager txnManager = new TransactionManager(config);
-        this.txSystemClient = new InMemoryTxSystemClient(txnManager);
+        TransactionFactory.getTransactionFactory().getTransactionContext().setInMemoryTransactionClient(config);
         this.guidePostsCache = new GuidePostsCache(this, config);
     }
 
@@ -531,11 +527,6 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
                 QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
     }
 
-    @Override
-    public TransactionSystemClient getTransactionSystemClient() {
-        return txSystemClient;
-    }
- 
     public MetaDataMutationResult createFunction(List<Mutation> functionData, PFunction function, boolean temporary)
             throws SQLException {
         return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, 0l, null);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index 7f7c027..6c464eb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -47,7 +47,6 @@ import org.apache.phoenix.schema.SequenceAllocation;
 import org.apache.phoenix.schema.SequenceKey;
 import org.apache.phoenix.schema.stats.GuidePostsInfo;
 import org.apache.phoenix.schema.stats.GuidePostsKey;
-import org.apache.tephra.TransactionSystemClient;
 
 
 public class DelegateConnectionQueryServices extends DelegateQueryServices implements ConnectionQueryServices {
@@ -257,11 +256,6 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public TransactionSystemClient getTransactionSystemClient() {
-        return getDelegate().getTransactionSystemClient();
-    }
-
-    @Override
     public MetaDataMutationResult createFunction(List<Mutation> functionData, PFunction function, boolean temporary)
             throws SQLException {
         return getDelegate().createFunction(functionData, function, temporary);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index d91ebcb..ab8fe5c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -69,13 +69,13 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDouble;
 import org.apache.phoenix.schema.types.PFloat;
 import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.SizedUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
-import org.apache.tephra.TxConstants;
 
 import com.google.common.base.Objects;
 import com.google.common.base.Preconditions;
@@ -1022,11 +1022,11 @@ public class PTableImpl implements PTable {
             if (PTableImpl.this.isTransactional()) {
                 Put put = new Put(key);
                 if (families.isEmpty()) {
-                    put.add(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), TxConstants.FAMILY_DELETE_QUALIFIER, ts,
+                    put.add(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), TransactionFactory.getTransactionFactory().getTransactionContext().get_famility_delete_marker(), ts,
                             HConstants.EMPTY_BYTE_ARRAY);
                 } else {
                     for (PColumnFamily colFamily : families) {
-                        put.add(colFamily.getName().getBytes(), TxConstants.FAMILY_DELETE_QUALIFIER, ts,
+                        put.add(colFamily.getName().getBytes(), TransactionFactory.getTransactionFactory().getTransactionContext().get_famility_delete_marker(), ts,
                                 HConstants.EMPTY_BYTE_ARRAY);
                     }
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index d122d0c..cec07d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -3,8 +3,12 @@ package org.apache.phoenix.transaction;
 import java.sql.SQLException;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.twill.zookeeper.ZKClientService;
 import org.slf4j.Logger;
 
 public class OmidTransactionContext implements PhoenixTransactionContext {
@@ -116,4 +120,25 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
         // TODO Auto-generated method stub
         return null;
     }
+
+    @Override
+    public void setInMemoryTransactionClient(Configuration config) {
+        // TODO Auto-generated method stub
+        
+    }
+
+    @Override
+    public ZKClientService setTransactionClient(Configuration config, ReadOnlyProps props,
+            ConnectionInfo connectionInfo) {
+        // TODO Auto-generated method stub
+        
+        return null;
+        
+    }
+
+    @Override
+    public byte[] get_famility_delete_marker() {
+        // TODO Auto-generated method stub
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index 0854f4e..36f7804 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -1,7 +1,11 @@
 package org.apache.phoenix.transaction;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.twill.zookeeper.ZKClientService;
 import org.slf4j.Logger;
 
 import java.sql.SQLException;
@@ -27,6 +31,22 @@ public interface PhoenixTransactionContext {
     public static final String READ_NON_TX_DATA = "data.tx.read.pre.existing";
 
     /**
+     * Set the in memory client connection to the transaction manager (for testing purpose)
+     *
+     * @param config
+     */
+    public void setInMemoryTransactionClient(Configuration config);
+
+    /**
+     * Set the client connection to the transaction manager
+     *
+     * @param config
+     * @param props
+     * @param connectionInfo
+     */
+    public ZKClientService setTransactionClient(Configuration config, ReadOnlyProps props, ConnectionInfo connectionInfo);
+
+    /**
      * Starts a transaction
      *
      * @throws SQLException
@@ -138,4 +158,10 @@ public interface PhoenixTransactionContext {
      * @return the coprocessor
      */
     public BaseRegionObserver getCoProcessor();
+
+    /**
+     * 
+     * @return the family delete marker
+     */
+    public byte[] get_famility_delete_marker(); 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f090dd24/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index a5e6e64..0334826 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -10,12 +10,13 @@ import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
 import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.tephra.Transaction;
 import org.apache.tephra.TransactionAware;
 import org.apache.tephra.TransactionCodec;
@@ -26,12 +27,21 @@ import org.apache.tephra.TransactionManager;
 import org.apache.tephra.TransactionSystemClient;
 import org.apache.tephra.Transaction.VisibilityLevel;
 import org.apache.tephra.TxConstants;
+import org.apache.tephra.distributed.PooledClientProvider;
+import org.apache.tephra.distributed.TransactionServiceClient;
 import org.apache.tephra.hbase.coprocessor.TransactionProcessor;
 import org.apache.tephra.inmemory.InMemoryTxSystemClient;
 import org.apache.tephra.util.TxUtils;
 import org.apache.tephra.visibility.FenceWait;
 import org.apache.tephra.visibility.VisibilityFence;
-
+import org.apache.tephra.zookeeper.TephraZKClientService;
+import org.apache.twill.discovery.ZKDiscoveryService;
+import org.apache.twill.zookeeper.RetryStrategies;
+import org.apache.twill.zookeeper.ZKClientService;
+import org.apache.twill.zookeeper.ZKClientServices;
+import org.apache.twill.zookeeper.ZKClients;
+
+import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Lists;
 
 import org.slf4j.Logger;
@@ -40,6 +50,8 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
     private static final TransactionCodec CODEC = new TransactionCodec();
 
+    private static TransactionSystemClient txClient = null;
+
     private final List<TransactionAware> txAwares;
     private final TransactionContext txContext;
     private Transaction tx;
@@ -59,17 +71,16 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     }
 
     public TephraTransactionContext(PhoenixConnection connection) {
-        this.txServiceClient = connection.getQueryServices()
-                .getTransactionSystemClient();
+        assert(txClient != null);
+        this.txServiceClient = txClient;  
         this.txAwares = Collections.emptyList();
         this.txContext = new TransactionContext(txServiceClient);
     }
 
     public TephraTransactionContext(PhoenixTransactionContext ctx,
             PhoenixConnection connection, boolean subTask) {
-        this.txServiceClient = connection.getQueryServices()
-                .getTransactionSystemClient();
-
+        assert(txClient != null);
+        this.txServiceClient = txClient;  
         assert (ctx instanceof TephraTransactionContext);
         TephraTransactionContext tephraTransactionContext = (TephraTransactionContext) ctx;
 
@@ -86,6 +97,38 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
+    public void setInMemoryTransactionClient(Configuration config) {
+        TransactionManager txnManager = new TransactionManager(config);
+        txClient = this.txServiceClient = new InMemoryTxSystemClient(txnManager);
+    }
+
+    @Override
+    public ZKClientService setTransactionClient(Configuration config, ReadOnlyProps props, ConnectionInfo connectionInfo) {
+        String zkQuorumServersString = props.get(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM);
+        if (zkQuorumServersString==null) {
+            zkQuorumServersString = connectionInfo.getZookeeperQuorum()+":"+connectionInfo.getPort();
+        }
+
+        int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
+        // Create instance of the tephra zookeeper client
+        ZKClientService txZKClientService  = ZKClientServices.delegate(
+            ZKClients.reWatchOnExpire(
+                ZKClients.retryOnFailure(
+                     new TephraZKClientService(zkQuorumServersString, timeOut, null,
+                             ArrayListMultimap.<String, byte[]>create()), 
+                         RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
+                     )
+                );
+        txZKClientService.startAndWait();
+        ZKDiscoveryService zkDiscoveryService = new ZKDiscoveryService(txZKClientService);
+        PooledClientProvider pooledClientProvider = new PooledClientProvider(
+                config, zkDiscoveryService);
+        txClient = this.txServiceClient = new TransactionServiceClient(config,pooledClientProvider);
+        
+        return txZKClientService;
+    }
+
+    @Override
     public void begin() throws SQLException {
         if (txContext == null) {
             throw new SQLExceptionInfo.Builder(
@@ -362,6 +405,11 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         return new TransactionProcessor();
     }
 
+    @Override
+    public byte[] get_famility_delete_marker() { 
+        return TxConstants.FAMILY_DELETE_QUALIFIER;
+    }
+
     /**
      * TephraTransactionContext specific functions
      */


[38/46] phoenix git commit: Merge branch 'master' into ConnectTALtoPhoenix

Posted by td...@apache.org.
Merge branch 'master' into ConnectTALtoPhoenix


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f6f8ddad
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f6f8ddad
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f6f8ddad

Branch: refs/heads/omid
Commit: f6f8ddadf644a52f1a315c0f22ed1ace5ccaebb3
Parents: b3a2136 9f38170
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Mon May 8 14:40:12 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Mon May 8 14:40:12 2017 +0300

----------------------------------------------------------------------
 phoenix-assembly/pom.xml                        |   2 +-
 phoenix-client/pom.xml                          |   2 +-
 phoenix-core/pom.xml                            |   2 +-
 ...ReplayWithIndexWritesAndCompressedWALIT.java |   2 +-
 .../apache/phoenix/end2end/AlterTableIT.java    |   9 +-
 .../org/apache/phoenix/end2end/ArrayIT.java     |   2 +-
 .../phoenix/end2end/AutomaticRebuildIT.java     |   4 +-
 .../phoenix/end2end/CoalesceFunctionIT.java     |  12 +-
 .../apache/phoenix/end2end/CreateTableIT.java   |   2 +-
 .../end2end/IndexToolForPartialBuildIT.java     |   4 +-
 .../phoenix/end2end/NthValueFunctionIT.java     |  99 +++++++
 .../end2end/SequenceBulkAllocationIT.java       |  44 ++--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  44 ++--
 .../phoenix/end2end/SortMergeJoinMoreIT.java    | 135 ++++++++++
 .../phoenix/end2end/SpillableGroupByIT.java     |   2 +-
 .../phoenix/end2end/StatsCollectorIT.java       |  11 +-
 .../phoenix/end2end/StoreNullsPropIT.java       |   2 +-
 .../end2end/SystemTablePermissionsIT.java       | 263 +++++++++++++++++++
 .../apache/phoenix/end2end/UpsertSelectIT.java  |  39 ++-
 .../apache/phoenix/end2end/index/IndexIT.java   |   5 +-
 .../phoenix/end2end/index/IndexMetadataIT.java  |   6 +-
 .../phoenix/end2end/index/IndexTestUtil.java    |   2 +-
 .../phoenix/end2end/index/LocalIndexIT.java     |  47 +++-
 .../org/apache/phoenix/rpc/UpdateCacheIT.java   |   2 +-
 .../phoenix/tx/ParameterizedTransactionIT.java  |   8 +-
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   |  10 +
 .../apache/phoenix/compile/JoinCompiler.java    |   6 +-
 .../phoenix/compile/SubselectRewriter.java      | 137 +++++++++-
 .../apache/phoenix/compile/UpsertCompiler.java  |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java       | 213 +++++++--------
 .../phoenix/coprocessor/MetaDataProtocol.java   |   4 +-
 .../phoenix/exception/SQLExceptionCode.java     |   1 +
 .../apache/phoenix/execute/DelegateHTable.java  |  22 +-
 .../phoenix/execute/DelegateQueryPlan.java      |   4 +
 .../phoenix/execute/SortMergeJoinPlan.java      |   8 +
 .../FirstLastValueServerAggregator.java         |   2 -
 .../org/apache/phoenix/hbase/index/Indexer.java |  66 ++---
 .../write/ParallelWriterIndexCommitter.java     |   8 +-
 .../TrackingParallelWriterIndexCommitter.java   |   8 +-
 .../apache/phoenix/index/IndexMaintainer.java   |   3 +
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   7 +-
 .../query/ConnectionQueryServicesImpl.java      |  60 ++++-
 .../apache/phoenix/schema/MetaDataClient.java   |  24 +-
 .../java/org/apache/phoenix/schema/PTable.java  |  10 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  15 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  17 ++
 .../org/apache/phoenix/util/RepairUtil.java     |  11 +-
 .../org/apache/phoenix/util/UpgradeUtil.java    |  75 +++++-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |   2 +-
 .../phoenix/compile/QueryCompilerTest.java      | 187 +++++++++++++
 .../recovery/TestPerRegionIndexWriteCache.java  |   2 +-
 .../query/ConnectionQueryServicesImplTest.java  |  73 +++++
 phoenix-flume/pom.xml                           |   2 +-
 phoenix-hive/pom.xml                            |   2 +-
 phoenix-kafka/pom.xml                           |   2 +-
 phoenix-pherf/pom.xml                           |   2 +-
 phoenix-pig/pom.xml                             |   2 +-
 phoenix-queryserver-client/pom.xml              |   2 +-
 phoenix-queryserver/pom.xml                     |   2 +-
 phoenix-server/pom.xml                          |   2 +-
 phoenix-spark/pom.xml                           |   2 +-
 phoenix-spark/src/it/resources/globalSetup.sql  |   3 +-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |  27 +-
 .../phoenix/spark/DataFrameFunctions.scala      |  19 +-
 .../apache/phoenix/spark/DefaultSource.scala    |   2 +-
 .../org/apache/phoenix/spark/PhoenixRDD.scala   |   4 +-
 phoenix-tracing-webapp/pom.xml                  |   2 +-
 pom.xml                                         |   6 +-
 68 files changed, 1484 insertions(+), 324 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6f8ddad/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6f8ddad/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6f8ddad/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6f8ddad/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6f8ddad/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------


[43/46] phoenix git commit: Merge remote-tracking branch 'upstream/master'

Posted by td...@apache.org.
Merge remote-tracking branch 'upstream/master'


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cd84de33
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cd84de33
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cd84de33

Branch: refs/heads/omid
Commit: cd84de335e14b5298028138a8aca6f64ae09dbc5
Parents: 9f38170 37d0a4a
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Tue May 9 10:19:06 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Tue May 9 10:19:06 2017 +0300

----------------------------------------------------------------------
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  |  93 ++++--
 .../apache/phoenix/compile/UpsertCompiler.java  |  14 +-
 .../UngroupedAggregateRegionObserver.java       |  14 +-
 .../hbase/index/covered/data/LocalTable.java    |  22 +-
 .../covered/update/IndexUpdateManager.java      |  12 +-
 .../index/covered/update/SortedCollection.java  | 128 --------
 .../org/apache/phoenix/query/QueryServices.java |   3 +
 .../phoenix/query/QueryServicesOptions.java     |   4 +-
 .../org/apache/phoenix/util/ExpressionUtil.java |  14 +
 .../index/covered/TestNonTxIndexBuilder.java    | 317 +++++++++++++++++++
 .../index/covered/data/TestLocalTable.java      |  63 ++++
 11 files changed, 506 insertions(+), 178 deletions(-)
----------------------------------------------------------------------



[06/46] phoenix git commit: remove spaces

Posted by td...@apache.org.
remove spaces


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c451343d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c451343d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c451343d

Branch: refs/heads/omid
Commit: c451343d9df44e88638487a2c509ae9b0db1664a
Parents: bcffce9
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Sun Feb 19 10:04:38 2017 +0200
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Sun Feb 19 10:04:38 2017 +0200

----------------------------------------------------------------------
 .../phoenix/transaction/TephraTransactionContext.java   | 12 ++++++------
 .../phoenix/transaction/TephraTransactionTable.java     |  4 ++--
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c451343d/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 9c7337d..8fc5e0f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -78,7 +78,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             txContext.finish();
         } catch (TransactionFailureException e) {
             this.e = e;
-            if (e instanceof TransactionConflictException) { 
+            if (e instanceof TransactionConflictException) {
                 throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION)
                     .setMessage(e.getMessage())
                     .setRootCause(e)
@@ -124,7 +124,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
                 throw new SQLException(e);
             }
         }
-  
+
         if (txContext == null) {
             tx.setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
         }
@@ -211,7 +211,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     public long getTransactionId() {
         if (this.txContext != null) {
             return txContext.getCurrentTransaction().getTransactionId();
-        } 
+        }
 
         if (tx != null) {
             return tx.getTransactionId();
@@ -224,7 +224,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     public long getReadPointer() {
         if (this.txContext != null) {
             return txContext.getCurrentTransaction().getReadPointer();
-        } 
+        }
 
         if (tx != null) {
             return tx.getReadPointer();
@@ -261,7 +261,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     public long getWritePointer() {
         if (this.txContext != null) {
             return txContext.getCurrentTransaction().getWritePointer();
-        } 
+        }
 
         if (tx != null) {
             return tx.getWritePointer();
@@ -274,7 +274,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     public VisibilityLevel getVisibilityLevel() {
         if (this.txContext != null) {
             return txContext.getCurrentTransaction().getVisibilityLevel();
-        } 
+        }
 
         if (tx != null) {
             return tx.getVisibilityLevel();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c451343d/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index c5ba33f..0823f89 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -17,9 +17,9 @@ import org.apache.tephra.hbase.TransactionAwareHTable;
 public class TephraTransactionTable implements PhoenixTransactionalTable {
 
     private TransactionAwareHTable transactionAwareHTable;
-    
+
     private TephraTransactionContext tephraTransactionContext;
-    
+
     public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) {
 
         assert(ctx instanceof TephraTransactionContext);


[42/46] phoenix git commit: PHOENIX-3818 Add client setting to disable server UPSERT SELECT work

Posted by td...@apache.org.
PHOENIX-3818 Add client setting to disable server UPSERT SELECT work

Adds phoenix.client.enable.server.upsert.select property that is true
(enabled) by default. This acts as a feature toggle for PHOENIX-3271.

Signed-off-by: Andrew Purtell <ap...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/37d0a4a0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/37d0a4a0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/37d0a4a0

Branch: refs/heads/omid
Commit: 37d0a4a038c1f843db2a1d68cfc3b3cfa8c8d537
Parents: 85e344f
Author: Alex Araujo <al...@gmail.com>
Authored: Mon May 1 20:27:18 2017 -0500
Committer: Andrew Purtell <ap...@apache.org>
Committed: Mon May 8 17:34:33 2017 -0700

----------------------------------------------------------------------
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  | 93 ++++++++++++++------
 .../apache/phoenix/compile/UpsertCompiler.java  | 14 ++-
 .../UngroupedAggregateRegionObserver.java       | 14 +--
 .../org/apache/phoenix/query/QueryServices.java |  3 +
 .../phoenix/query/QueryServicesOptions.java     |  4 +-
 .../org/apache/phoenix/util/ExpressionUtil.java | 14 +++
 6 files changed, 97 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/37d0a4a0/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index 410f02c..b9e4fff 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -45,11 +45,12 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -67,14 +68,14 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
     	Map<String, String> serverProps = Collections.singletonMap(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, 
         		TestPhoenixIndexRpcSchedulerFactory.class.getName());
         // use the standard rpc controller for client rpc, so that we can isolate server rpc and ensure they use the correct queue  
-    	Map<String, String> clientProps = Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, 
-    			RpcControllerFactory.class.getName());      
+    	Map<String, String> clientProps = Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+    			RpcControllerFactory.class.getName());
         NUM_SLAVES_BASE = 2;
         setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
     }
     
-    @AfterClass
-    public static void cleanUpAfterTestSuite() throws Exception {
+    @After
+    public void cleanUpAfterTest() throws Exception {
         TestPhoenixIndexRpcSchedulerFactory.reset();
     }
     
@@ -91,26 +92,19 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = driver.connect(getUrl(), props);
         try {
-            // create the table 
-            conn.createStatement().execute(
-                    "CREATE TABLE " + dataTableFullName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+            // create the table
+            createTable(conn, dataTableFullName);
     
-            // create the index 
-            conn.createStatement().execute(
-                    "CREATE INDEX " + indexName + " ON " + dataTableFullName + " (v1) INCLUDE (v2)");
+            // create the index
+            createIndex(conn, indexName);
 
             ensureTablesOnDifferentRegionServers(dataTableFullName, indexTableFullName);
     
-            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTableFullName + " VALUES(?,?,?)");
-            stmt.setString(1, "k1");
-            stmt.setString(2, "v1");
-            stmt.setString(3, "v2");
-            stmt.execute();
-            conn.commit();
+            upsertRow(conn, dataTableFullName);
     
             // run select query that should use the index
             String selectSql = "SELECT k, v2 from " + dataTableFullName + " WHERE v1=?";
-            stmt = conn.prepareStatement(selectSql);
+            PreparedStatement stmt = conn.prepareStatement(selectSql);
             stmt.setString(1, "v1");
     
             // verify that the query does a range scan on the index table
@@ -127,17 +121,11 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
             // drop index table 
             conn.createStatement().execute(
                     "DROP INDEX " + indexName + " ON " + dataTableFullName );
-            // create a data table with the same name as the index table 
-            conn.createStatement().execute(
-                    "CREATE TABLE " + indexTableFullName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+            // create a data table with the same name as the index table
+            createTable(conn, indexTableFullName);
             
             // upsert one row to the table (which has the same table name as the previous index table)
-            stmt = conn.prepareStatement("UPSERT INTO " + indexTableFullName + " VALUES(?,?,?)");
-            stmt.setString(1, "k1");
-            stmt.setString(2, "v1");
-            stmt.setString(3, "v2");
-            stmt.execute();
-            conn.commit();
+            upsertRow(conn, indexTableFullName);
             
             // run select query on the new table
             selectSql = "SELECT k, v2 from " + indexTableFullName + " WHERE v1=?";
@@ -155,8 +143,7 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
             Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
             
             TestPhoenixIndexRpcSchedulerFactory.reset();
-            conn.createStatement().execute(
-                    "CREATE INDEX " + indexName + "_1 ON " + dataTableFullName + " (v1) INCLUDE (v2)");
+            createIndex(conn, indexName + "_1");
             // verify that that index queue is used and only once (during Upsert Select on server to build the index)
             Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
         }
@@ -165,6 +152,54 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
         }
     }
 
+    @Test
+    public void testUpsertSelectServerDisabled() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        // disable server side upsert select
+        props.setProperty(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "false");
+        try (Connection conn = driver.connect(getUrl(), props)) {
+            // create two tables with identical schemas
+            createTable(conn, dataTableFullName);
+            upsertRow(conn, dataTableFullName);
+            String tableName2 = dataTableFullName + "_2";
+            createTable(conn, tableName2);
+            ensureTablesOnDifferentRegionServers(dataTableFullName, tableName2);
+            // copy the row from the first table using upsert select
+            upsertSelectRows(conn, dataTableFullName, tableName2);
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor(),
+                    Mockito.never()).dispatch(Mockito.any(CallRunner.class));
+
+        }
+    }
+
+    private void createTable(Connection conn, String tableName) throws SQLException {
+        conn.createStatement().execute(
+                "CREATE TABLE " + tableName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+    }
+
+    private void createIndex(Connection conn, String indexName) throws SQLException {
+        conn.createStatement().execute(
+                "CREATE INDEX " + indexName + " ON " + dataTableFullName + " (v1) INCLUDE (v2)");
+    }
+
+    private void upsertRow(Connection conn, String tableName) throws SQLException {
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
+        stmt.setString(1, "k1");
+        stmt.setString(2, "v1");
+        stmt.setString(3, "v2");
+        stmt.execute();
+        conn.commit();
+    }
+
+    private void upsertSelectRows(Connection conn, String tableName1, String tableName2) throws SQLException {
+        PreparedStatement stmt =
+                conn.prepareStatement(
+                        "UPSERT INTO " + tableName2 + " (k, v1, v2) SELECT k, v1, v2 FROM "
+                                + tableName1);
+        stmt.execute();
+        conn.commit();
+    }
+
 	/**
 	 * Verifies that the given tables each have a single region and are on
 	 * different region servers. If they are on the same server moves tableName2

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37d0a4a0/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index 5559ad7..bbbd483 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -105,6 +105,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PUnsignedLong;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.ExpressionUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -335,6 +336,9 @@ public class UpsertCompiler {
         int nValuesToSet;
         boolean sameTable = false;
         boolean runOnServer = false;
+        boolean serverUpsertSelectEnabled =
+                services.getProps().getBoolean(QueryServices.ENABLE_SERVER_UPSERT_SELECT,
+                        QueryServicesOptions.DEFAULT_ENABLE_SERVER_UPSERT_SELECT);
         UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null;
         // Retry once if auto commit is off, as the meta data may
         // be out of date. We do not retry if auto commit is on, as we
@@ -505,7 +509,7 @@ public class UpsertCompiler {
                         && tableRefToBe.equals(selectResolver.getTables().get(0));
                     tableRefToBe = adjustTimestampToMinOfSameTable(tableRefToBe, selectResolver.getTables());
                     /* We can run the upsert in a coprocessor if:
-                     * 1) from has only 1 table
+                     * 1) from has only 1 table or server UPSERT SELECT is enabled
                      * 2) the select query isn't doing aggregation (which requires a client-side final merge)
                      * 3) autoCommit is on
                      * 4) the table is not immutable with indexes, as the client is the one that figures out the additional
@@ -523,7 +527,7 @@ public class UpsertCompiler {
                         // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using query,
                         // so we might be able to run it entirely on the server side.
                         // region space managed by region servers. So we bail out on executing on server side.
-                        runOnServer = isAutoCommit && !table.isTransactional()
+                        runOnServer = (sameTable || serverUpsertSelectEnabled) && isAutoCommit && !table.isTransactional()
                                 && !(table.isImmutableRows() && !table.getIndexes().isEmpty())
                                 && !select.isJoin() && table.getRowTimestampColPos() == -1;
                     }
@@ -666,7 +670,11 @@ public class UpsertCompiler {
                     reverseColumnIndexes[tempPos] = pos;
                     reverseColumnIndexes[i] = i;
                 }
-
+                // If any pk slots are changing and server side UPSERT SELECT is disabled, do not run on server
+                if (!serverUpsertSelectEnabled && ExpressionUtil
+                        .isPkPositionChanging(new TableRef(table), projectedExpressions)) {
+                    runOnServer = false;
+                }
                 ////////////////////////////////////////////////////////////////////
                 // UPSERT SELECT run server-side
                 /////////////////////////////////////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37d0a4a0/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 2dec235..49ef884 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -112,6 +112,7 @@ import org.apache.phoenix.schema.types.PFloat;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EncodedColumnsUtil;
+import org.apache.phoenix.util.ExpressionUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
@@ -398,7 +399,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             values = new byte[projectedTable.getPKColumns().size()][];
             areMutationInSameRegion = Bytes.compareTo(targetHTable.getTableName(),
                     region.getTableDesc().getTableName().getName()) == 0
-                    && !isPkPositionChanging(new TableRef(projectedTable), selectExpressions);
+                    && !ExpressionUtil.isPkPositionChanging(new TableRef(projectedTable), selectExpressions);
             
         } else {
             byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
@@ -792,17 +793,6 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         }
     }
 
-    private boolean isPkPositionChanging(TableRef tableRef, List<Expression> projectedExpressions) throws SQLException {
-        // If the row ends up living in a different region, we'll get an error otherwise.
-        for (int i = 0; i < tableRef.getTable().getPKColumns().size(); i++) {
-            PColumn column = tableRef.getTable().getPKColumns().get(i);
-            Expression source = projectedExpressions.get(i);
-            if (source == null || !source
-                    .equals(new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) { return true; }
-        }
-        return false;
-    }
-
     private boolean readyToCommit(MutationList mutations, int maxBatchSize, long maxBatchSizeBytes) {
         return !mutations.isEmpty() && (maxBatchSize > 0 && mutations.size() > maxBatchSize)
                 || (maxBatchSizeBytes > 0 && mutations.heapSize() > maxBatchSizeBytes);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37d0a4a0/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 0b7b737..c01d11f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -248,6 +248,9 @@ public interface QueryServices extends SQLCloseable {
     public static final String DEFAULT_IMMUTABLE_STORAGE_SCHEME_ATTRIB  = "phoenix.default.immutable.storage.scheme";
     public static final String DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME_ATTRIB  = "phoenix.default.multitenant.immutable.storage.scheme";
 
+    // whether to enable server side RS -> RS calls for upsert select statements
+    public static final String ENABLE_SERVER_UPSERT_SELECT ="phoenix.client.enable.server.upsert.select";
+
     /**
      * Get executor service used for parallel scans
      */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37d0a4a0/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 4fd1344..1ddf7eb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -290,7 +290,9 @@ public class QueryServicesOptions {
                                                                                     // 4.10, psql and CSVBulkLoad
                                                                                     // expects binary data to be base 64
                                                                                     // encoded
-    
+    // RS -> RS calls for upsert select statements are enabled by default
+    public static final boolean DEFAULT_ENABLE_SERVER_UPSERT_SELECT = true;
+
     private final Configuration config;
 
     private QueryServicesOptions(Configuration config) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37d0a4a0/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java
index 1fbb534..fbd10fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java
@@ -10,11 +10,15 @@
 package org.apache.phoenix.util;
 
 import java.sql.SQLException;
+import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.Determinism;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.schema.ColumnRef;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
 
@@ -54,4 +58,14 @@ public class ExpressionUtil {
         return false;
     }
 
+    public static boolean isPkPositionChanging(TableRef tableRef, List<Expression> projectedExpressions) throws SQLException {
+        for (int i = 0; i < tableRef.getTable().getPKColumns().size(); i++) {
+            PColumn column = tableRef.getTable().getPKColumns().get(i);
+            Expression source = projectedExpressions.get(i);
+            if (source == null || !source
+                    .equals(new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) { return true; }
+        }
+        return false;
+    }
+
 }


[13/46] phoenix git commit: PHOENIX-3476: prevent loss of offset when sub-aggregating (Matthew Silverman)

Posted by td...@apache.org.
PHOENIX-3476: prevent loss of offset when sub-aggregating (Matthew Silverman)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2074d1f0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2074d1f0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2074d1f0

Branch: refs/heads/omid
Commit: 2074d1f0a2dd2b03c2e3588ffd4d5f2395cc7505
Parents: 7050b92
Author: James Taylor <ja...@apache.org>
Authored: Tue Mar 21 15:48:36 2017 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Tue Mar 21 15:48:36 2017 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/NthValueFunctionIT.java     | 34 ++++++++++++++++++++
 .../FirstLastValueServerAggregator.java         |  2 --
 2 files changed, 34 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2074d1f0/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java
index 1278e26..ff0f094 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java
@@ -128,6 +128,40 @@ public class NthValueFunctionIT extends ParallelStatsDisabledIT {
     }
 
     @Test
+    public void offsetValueSubAggregation() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String nth_test_table = generateUniqueName();
+        String ddl = "CREATE TABLE IF NOT EXISTS " + nth_test_table + " "
+                + "(id INTEGER NOT NULL PRIMARY KEY, page_id UNSIGNED_LONG,"
+                + " \"DATE\" INTEGER, \"value\" UNSIGNED_LONG)";
+        conn.createStatement().execute(ddl);
+
+        conn.createStatement().execute("UPSERT INTO " + nth_test_table
+                + " (id, page_id, \"DATE\", \"value\") VALUES (1, 8, 0, 300)");
+        conn.createStatement().execute(
+                "UPSERT INTO " + nth_test_table + " (id, page_id, \"DATE\", \"value\") VALUES (2, 8, 1, 7)");
+        conn.createStatement().execute(
+                "UPSERT INTO " + nth_test_table + " (id, page_id, \"DATE\", \"value\") VALUES (3, 9, 2, 9)");
+        conn.createStatement().execute(
+                "UPSERT INTO " + nth_test_table + " (id, page_id, \"DATE\", \"value\") VALUES (4, 9, 3, 4)");
+        conn.createStatement().execute(
+                "UPSERT INTO " + nth_test_table + " (id, page_id, \"DATE\", \"value\") VALUES (5, 10, 4, 2)");
+        conn.createStatement().execute("UPSERT INTO " + nth_test_table
+                + " (id, page_id, \"DATE\", \"value\") VALUES (6, 10, 5, 150)");
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery(
+                "SELECT NTH_VALUE(SUM_VALUE, 2) WITHIN GROUP (ORDER BY MIN_DATE ASC) FROM (" +
+                        "SELECT MIN(\"DATE\") AS MIN_DATE, SUM(\"value\") AS SUM_VALUE FROM "
+                        + nth_test_table + " GROUP BY page_id) x");
+
+        assertTrue(rs.next());
+        assertEquals(13, rs.getLong(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
     public void offsetValueLastMismatchByColumn() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2074d1f0/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
index 273b890..66b38c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
@@ -64,8 +64,6 @@ public class FirstLastValueServerAggregator extends BaseAggregator {
         topValue = null;
         topValues.clear();
         topValuesCount = 0;
-        offset = -1;
-        useOffset = false;
     }
 
     @Override


[23/46] phoenix git commit: PHOENIX-3777 Add test for NTH_VALUE() function with multiple grouping sets (Loknath Priyatham Teja Singamsetty)

Posted by td...@apache.org.
PHOENIX-3777 Add test for NTH_VALUE() function with multiple grouping sets (Loknath Priyatham Teja Singamsetty)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8309b227
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8309b227
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8309b227

Branch: refs/heads/omid
Commit: 8309b227164eb53ffc60cb1d63635db7166d4b45
Parents: bcceaf8
Author: James Taylor <ja...@apache.org>
Authored: Mon Apr 17 10:27:14 2017 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Mon Apr 17 10:27:14 2017 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/NthValueFunctionIT.java     | 65 ++++++++++++++++++++
 1 file changed, 65 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8309b227/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java
index ff0f094..80da494 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NthValueFunctionIT.java
@@ -61,6 +61,71 @@ public class NthValueFunctionIT extends ParallelStatsDisabledIT {
     }
 
     @Test
+    public void multipleNthValueFunctionTest() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String nthValue = generateUniqueName();
+        String ddl = "CREATE TABLE IF NOT EXISTS " + nthValue + " "
+                + "(id INTEGER NOT NULL, feid UNSIGNED_LONG NOT NULL,"
+                + " uid INTEGER NOT NULL, lrd INTEGER"
+                + " CONSTRAINT PKVIEW PRIMARY KEY ( id, feid, uid))";
+        conn.createStatement().execute(ddl);
+
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 8, 1, 7)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 8, 2, 9)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 8, 3, 4)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 8, 4, 2)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 9, 5, 1)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 9, 6, 3)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 9, 8, 5)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 9, 7, 8)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 10, 5, 1)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 10, 6, 3)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 10, 7, 5)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (2, 10, 8, 8)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (3, 10, 5, 1)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (3, 10, 6, 3)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (3, 10, 7, 5)");
+        conn.createStatement().execute(
+            "UPSERT INTO " + nthValue + " (id, feid, uid, lrd) VALUES (3, 10, 8, 8)");
+
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery(
+            "SELECT NTH_VALUE(uid, 1) WITHIN GROUP (ORDER BY lrd DESC) as nth1_user_id, NTH_VALUE(uid, 2) WITHIN GROUP (ORDER BY lrd DESC) as nth2_user_id, NTH_VALUE(uid, 3) WITHIN GROUP (ORDER BY lrd DESC) as nth3_user_id  FROM " + nthValue
+                + " where id=2 and feid in (8, 9, 10) GROUP BY feid");
+
+        assertTrue(rs.next());
+        assertEquals(rs.getInt(1), 2);
+        assertEquals(rs.getInt(2), 1);
+        assertEquals(rs.getInt(3), 3);
+        assertTrue(rs.next());
+        assertEquals(rs.getInt(1), 7);
+        assertEquals(rs.getInt(2), 8);
+        assertEquals(rs.getInt(3), 6);
+        assertTrue(rs.next());
+        assertEquals(rs.getInt(1), 8);
+        assertEquals(rs.getInt(2), 7);
+        assertEquals(rs.getInt(3), 6);
+        assertFalse(rs.next());
+    }
+
+    @Test
     public void offsetValueAscOrder() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
 


[11/46] phoenix git commit: PHOENIX-2051 Link record is in the format CHILD-PARENT for phoenix views and it has to scan the entire table to find the parent suffix

Posted by td...@apache.org.
PHOENIX-2051 Link record is in the format CHILD-PARENT for phoenix views and it has to scan the entire table to find the parent suffix


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8093d10f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8093d10f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8093d10f

Branch: refs/heads/omid
Commit: 8093d10f1a481101d6c93fdf0744ff15ec48f4aa
Parents: bc814f1
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Fri Feb 17 15:50:24 2017 -0800
Committer: Thomas D'Silva <td...@apache.org>
Committed: Tue Mar 14 19:15:02 2017 -0700

----------------------------------------------------------------------
 .../coprocessor/MetaDataEndpointImpl.java       | 213 +++++++++----------
 .../phoenix/coprocessor/MetaDataProtocol.java   |   4 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   4 +-
 .../query/ConnectionQueryServicesImpl.java      |   4 +
 .../apache/phoenix/schema/MetaDataClient.java   |  24 ++-
 .../java/org/apache/phoenix/schema/PTable.java  |  10 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  17 ++
 .../org/apache/phoenix/util/UpgradeUtil.java    |  73 +++++++
 8 files changed, 228 insertions(+), 121 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8093d10f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index e21ef04..4ad3a8c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -55,6 +55,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT_BYTES
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES;
@@ -115,7 +116,6 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -972,36 +972,32 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 (!EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme) || tableType == PTableType.VIEW) ? PTable.EncodedCQCounter.NULL_COUNTER
                         : new EncodedCQCounter();
         while (true) {
-            results.clear();
-            scanner.next(results);
-            if (results.isEmpty()) {
-                break;
-            }
-            Cell colKv = results.get(LINK_TYPE_INDEX);
-            if (colKv != null) {
-                int colKeyLength = colKv.getRowLength();
-                PName colName = newPName(colKv.getRowArray(), colKv.getRowOffset() + offset, colKeyLength-offset);
-                int colKeyOffset = offset + colName.getBytes().length + 1;
-                PName famName = newPName(colKv.getRowArray(), colKv.getRowOffset() + colKeyOffset, colKeyLength-colKeyOffset);
-                if (isQualifierCounterKV(colKv)) {
-                    Integer value = PInteger.INSTANCE.getCodec().decodeInt(colKv.getValueArray(), colKv.getValueOffset(), SortOrder.ASC);
-                    cqCounter.setValue(famName.getString(), value);
-                } else {
-                    if (colName.getString().isEmpty() && famName != null) {
-                        LinkType linkType = LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]);
-                        if (linkType == LinkType.INDEX_TABLE) {
-                            addIndexToTable(tenantId, schemaName, famName, tableName, clientTimeStamp, indexes);
-                        } else if (linkType == LinkType.PHYSICAL_TABLE) {
-                            physicalTables.add(famName);
-                        } else if (linkType == LinkType.PARENT_TABLE) {
-                            parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(famName.getBytes()));
-                            parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(famName.getBytes()));
-                        }
-                    } else {
-                        addColumnToTable(results, colName, famName, colKeyValues, columns, saltBucketNum != null);
-                    }
-                } 
-            }
+          results.clear();
+          scanner.next(results);
+          if (results.isEmpty()) {
+              break;
+          }
+          Cell colKv = results.get(LINK_TYPE_INDEX);
+          int colKeyLength = colKv.getRowLength();
+          PName colName = newPName(colKv.getRowArray(), colKv.getRowOffset() + offset, colKeyLength-offset);
+          int colKeyOffset = offset + colName.getBytes().length + 1;
+          PName famName = newPName(colKv.getRowArray(), colKv.getRowOffset() + colKeyOffset, colKeyLength-colKeyOffset);
+          if (isQualifierCounterKV(colKv)) {
+              Integer value = PInteger.INSTANCE.getCodec().decodeInt(colKv.getValueArray(), colKv.getValueOffset(), SortOrder.ASC);
+              cqCounter.setValue(famName.getString(), value);
+          } else if (Bytes.compareTo(LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length, colKv.getQualifierArray(), colKv.getQualifierOffset(), colKv.getQualifierLength())==0) {    
+              LinkType linkType = LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]);
+              if (linkType == LinkType.INDEX_TABLE) {
+                  addIndexToTable(tenantId, schemaName, famName, tableName, clientTimeStamp, indexes);
+              } else if (linkType == LinkType.PHYSICAL_TABLE) {
+                  physicalTables.add(famName);
+              } else if (linkType == LinkType.PARENT_TABLE) {
+                  parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(famName.getBytes()));
+                  parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(famName.getBytes()));
+              }
+          } else {
+              addColumnToTable(results, colName, famName, colKeyValues, columns, saltBucketNum != null);
+          }
         }
         // Avoid querying the stats table because we're holding the rowLock here. Issuing an RPC to a remote
         // server while holding this lock is a bad idea and likely to cause contention.
@@ -1648,41 +1644,37 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         return rowLock;
     }
 
-    private static final byte[] PHYSICAL_TABLE_BYTES = new byte[] {PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()};
+    private static final byte[] CHILD_TABLE_BYTES = new byte[] {PTable.LinkType.CHILD_TABLE.getSerializedValue()};
+
+    private void findAllChildViews(Region region, byte[] tenantId, byte[] schemaName, byte[] tableName, TableViewFinderResult result) throws IOException {
+        TableViewFinderResult currResult = findChildViews(region, tenantId, schemaName, tableName);
+        result.addResult(currResult);
+        for (Result viewResult : currResult.getResults()) {
+            byte[][] rowViewKeyMetaData = new byte[5][];
+            getVarChars(viewResult.getRow(), 5, rowViewKeyMetaData);
+            byte[] viewtenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
+            byte[] viewSchema = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] viewTable = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            findAllChildViews(region, viewtenantId, viewSchema, viewTable, result);
+        }
+    }
 
     /**
      * @param tableName parent table's name
      * Looks for whether child views exist for the table specified by table.
      * TODO: should we pass a timestamp here?
      */
-    private TableViewFinderResult findChildViews(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes) throws IOException {
-        byte[] schemaName = table.getSchemaName().getBytes();
-        byte[] tableName = table.getTableName().getBytes();
-        boolean isMultiTenant = table.isMultiTenant();
+    private TableViewFinderResult findChildViews(Region region, byte[] tenantId, byte[] schemaName, byte[] tableName) throws IOException {
         Scan scan = new Scan();
-        // If the table is multi-tenant, we need to check across all tenant_ids,
-        // so we can't constrain the row key. Otherwise, any views would have
-        // the same tenantId.
-        if (!isMultiTenant) {
-            byte[] startRow = ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY);
-            byte[] stopRow = ByteUtil.nextKey(startRow);
-            scan.setStartRow(startRow);
-            scan.setStopRow(stopRow);
-        }
-        SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, linkTypeBytes);
-        SingleColumnValueFilter tableTypeFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES,
-                CompareOp.EQUAL, PTableType.VIEW.getSerializedValue().getBytes());
-        tableTypeFilter.setFilterIfMissing(false);
+        byte[] startRow = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
+        byte[] stopRow = ByteUtil.nextKey(startRow);
+        scan.setStartRow(startRow);
+        scan.setStopRow(stopRow);
+        SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, CHILD_TABLE_BYTES);
         linkFilter.setFilterIfMissing(true);
-        byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil
-                .getPhysicalTableName(SchemaUtil.getTableNameAsBytes(schemaName, tableName), table.isNamespaceMapped())
-                .getName());
-        SuffixFilter rowFilter = new SuffixFilter(suffix);
-        FilterList filter = new FilterList(linkFilter,tableTypeFilter,rowFilter);
-        scan.setFilter(filter);
+        scan.setFilter(linkFilter);
         scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
-        scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
-        scan.addColumn(TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
+        scan.addColumn(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES);
         
         // Original region-only scanner modified due to PHOENIX-1208
         // RegionScanner scanner = region.getScanner(scan);
@@ -1709,7 +1701,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     }
                     results.add(result);
                 }
-                TableViewFinderResult tableViewFinderResult = new TableViewFinderResult(results, table);
+                TableViewFinderResult tableViewFinderResult = new TableViewFinderResult(results);
                 if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
                     tableViewFinderResult.setAllViewsNotInSingleRegion();
                 }
@@ -1851,12 +1843,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         EnvironmentEdgeManager.currentTimeMillis(), null);
             }
 
-            // Only tables may have views, so prevent the running of this potentially
-            // expensive full table scan over the SYSTEM.CATALOG table unless it's needed.
             if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) {
                 // Handle any child views that exist
-                TableViewFinderResult tableViewFinderResult = findChildViews(region, tenantId, table,
-                        PHYSICAL_TABLE_BYTES);
+                TableViewFinderResult tableViewFinderResult = findChildViews(region, tenantId, table.getSchemaName().getBytes(), table.getTableName().getBytes());
                 if (tableViewFinderResult.hasViews()) {
                     if (isCascade) {
                         if (tableViewFinderResult.allViewsInMultipleRegions()) {
@@ -1867,11 +1856,11 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         } else if (tableViewFinderResult.allViewsInSingleRegion()) {
                             // Recursively delete views - safe as all the views as all in the same region
                             for (Result viewResult : tableViewFinderResult.getResults()) {
-                                byte[][] rowKeyMetaData = new byte[3][];
-                                getVarChars(viewResult.getRow(), 3, rowKeyMetaData);
-                                byte[] viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
-                                byte[] viewSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
-                                byte[] viewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
+                                byte[][] rowViewKeyMetaData = new byte[5][];
+                                getVarChars(viewResult.getRow(), 5, rowViewKeyMetaData);
+                                byte[] viewTenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
+                                byte[] viewSchemaName = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+                                byte[] viewName = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
                                 byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
                                 Delete delete = new Delete(viewKey, clientTimeStamp);
                                 rowsToDelete.add(delete);
@@ -1903,12 +1892,20 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 Cell kv = results.get(LINK_TYPE_INDEX);
                 int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), 0, rowKeyMetaData);
                 if (nColumns == 5
-                        && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0
-                        && rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX].length > 0
+                        && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0
                         && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(),
-                                LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0
-                        && LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]) == LinkType.INDEX_TABLE) {
-                    indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX]);
+                                LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0) {
+                        LinkType linkType = LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]);
+                        if (rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0 && linkType == LinkType.INDEX_TABLE) {
+                            indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
+                        } else if (linkType == LinkType.PARENT_TABLE || linkType == LinkType.PHYSICAL_TABLE) {
+                            // delete parent->child link for views
+                            Cell parentTenantIdCell = MetaDataUtil.getCell(results, PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES);
+                            PName parentTenantId = parentTenantIdCell!=null ? PNameFactory.newName(parentTenantIdCell.getValueArray(), parentTenantIdCell.getValueOffset(), parentTenantIdCell.getValueLength()) : null;
+                            byte[] linkKey = MetaDataUtil.getChildLinkKey(parentTenantId, table.getParentSchemaName(), table.getParentTableName(), table.getTenantId(), table.getName());
+                            Delete linkDelete = new Delete(linkKey, clientTimeStamp);
+                            rowsToDelete.add(linkDelete);
+                        }
                 }
                 // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
                 // FIXME: the version of the Delete constructor without the lock args was introduced
@@ -1920,7 +1917,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 scanner.next(results);
             } while (!results.isEmpty());
         }
-
+        
         // Recursively delete indexes
         for (byte[] indexName : indexNames) {
             byte[] indexKey = SchemaUtil.getTableKey(tenantId, schemaName, indexName);
@@ -2255,11 +2252,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             short deltaNumPkColsSoFar = 0;
             short columnsAddedToView = 0;
             short columnsAddedToBaseTable = 0;
-            byte[][] rowViewKeyMetaData = new byte[3][];
-            getVarChars(viewResult.getRow(), 3, rowViewKeyMetaData);
-            byte[] viewKey = SchemaUtil.getTableKey(rowViewKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX],
-                    rowViewKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX],
-                    rowViewKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
+            byte[][] rowViewKeyMetaData = new byte[5][];
+            getVarChars(viewResult.getRow(), 5, rowViewKeyMetaData);
+            byte[] tenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
+            byte[] schema = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] table = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] viewKey = SchemaUtil.getTableKey(tenantId, schema, table);
             
             // lock the rows corresponding to views so that no other thread can modify the view meta-data
             RowLock viewRowLock = acquireLock(region, viewKey, locks);
@@ -2580,13 +2578,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
         for (Result viewResult : childViewsResult.getResults()) {
             short numColsDeleted = 0;
-            byte[][] rowViewKeyMetaData = new byte[3][];
-            getVarChars(viewResult.getRow(), 3, rowViewKeyMetaData);
-            byte[] viewKey =
-                    SchemaUtil.getTableKey(
-                        rowViewKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX],
-                        rowViewKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX],
-                        rowViewKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
+            byte[][] rowViewKeyMetaData = new byte[5][];
+            getVarChars(viewResult.getRow(), 5, rowViewKeyMetaData);
+            byte[] viewTenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
+            byte[] viewSchemaName = SchemaUtil.getSchemaNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] viewName = SchemaUtil.getTableNameFromFullName(rowViewKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]).getBytes();
+            byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
 
             // lock the rows corresponding to views so that no other thread can modify the view
             // meta-data
@@ -2882,17 +2879,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                             schemaName, tableName);
                     // Size for worst case - all new columns are PK column
                     List<Mutation> mutationsForAddingColumnsToViews = Lists.newArrayListWithExpectedSize(tableMetaData.size() * ( 1 + table.getIndexes().size()));
-                    /*
-                     * If adding a column to a view, we don't want to propagate those meta-data changes to the child
-                     * view hierarchy. This is because our check of finding child views is expensive and we want making
-                     * meta-data changes to views to be light-weight. The side-effect of this change is that a child
-                     * won't have it's parent views columns i.e. it would have diverged itself from the parent view. See
-                     * https://issues.apache.org/jira/browse/PHOENIX-2051 for a proper way to fix the performance issue
-                     * and https://issues.apache.org/jira/browse/PHOENIX-2054 for enabling meta-data changes to a view
-                     * to be propagated to its view hierarchy.
-                     */
+                    // TODO propagate to grandchild views as well
                     if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
-                        TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
+                        TableViewFinderResult childViewsResult = new TableViewFinderResult();
+                        findAllChildViews(region, tenantId, table.getSchemaName().getBytes(), table.getTableName().getBytes(), childViewsResult);
                         if (childViewsResult.hasViews()) {
                             /* 
                              * Dis-allow if:
@@ -3166,13 +3156,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     List<Mutation> additionalTableMetaData = Lists.newArrayList();
                     
                     PTableType type = table.getType();
-                    // Only tables may have views, so prevent the running of this potentially
-                    // expensive full table scan over the SYSTEM.CATALOG table unless it's needed.
-                    // In the case of a view, we allow a column to be dropped without checking for
-                    // child views, but in the future we'll allow it and propagate it as necessary.
+                    // TODO propagate to grandchild views as well
                     if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
-                        TableViewFinderResult childViewsResult = 
-                                findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
+                        TableViewFinderResult childViewsResult = new TableViewFinderResult();
+                        findAllChildViews(region, tenantId, table.getSchemaName().getBytes(), table.getTableName().getBytes(), childViewsResult);
                         if (childViewsResult.hasViews()) {
                             MetaDataMutationResult mutationResult =
                                     dropColumnsFromChildViews(region, table,
@@ -3560,21 +3547,16 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
         private List<Result> results = Lists.newArrayList();
         private boolean allViewsNotInSingleRegion = false;
-        private PTable table;
 
-        private TableViewFinderResult(List<Result> results, PTable table) {
+        private TableViewFinderResult() {
+        }
+        
+        private TableViewFinderResult(List<Result> results) {
             this.results = results;
-            this.table = table;
         }
-
+        
         public boolean hasViews() {
-            int localIndexesCount = 0;
-            for(PTable index : table.getIndexes()) {
-                if(index.getIndexType().equals(IndexType.LOCAL)) {
-                    localIndexesCount++;
-                }
-            }
-            return results.size()-localIndexesCount > 0;
+            return !results.isEmpty();
         }
 
         private void setAllViewsNotInSingleRegion() {
@@ -3598,6 +3580,13 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         private boolean allViewsInMultipleRegions() {
             return results.size() > 0 && allViewsNotInSingleRegion;
         }
+        
+        private void addResult(TableViewFinderResult result) {
+            this.results.addAll(result.getResults());
+            if (result.allViewsInMultipleRegions()) {
+                this.setAllViewsNotInSingleRegion();
+            }
+        }
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8093d10f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index f2abbdb..93bba74 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -86,8 +86,9 @@ public abstract class MetaDataProtocol extends MetaDataService {
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_8_1 = MIN_TABLE_TIMESTAMP + 18;
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0 = MIN_TABLE_TIMESTAMP + 20;
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 = MIN_TABLE_TIMESTAMP + 25;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0 = MIN_TABLE_TIMESTAMP + 26;
     // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* constants
-    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
     
     // ALWAYS update this map whenever rolling out a new release (major, minor or patch release). 
     // Key is the SYSTEM.CATALOG timestamp for the version and value is the version string.
@@ -103,6 +104,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0, "4.8.x");
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0, "4.9.x");
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0, "4.10.x");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0, "4.11.x");
     }
     
     public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION + "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER; 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8093d10f/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 0e62164..e3a206c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -78,7 +78,6 @@ import com.google.common.collect.Lists;
  * 
  */
 public class PhoenixDatabaseMetaData implements DatabaseMetaData {
-    public static final int INDEX_NAME_INDEX = 4; // Shared with FAMILY_NAME_INDEX
     public static final int FAMILY_NAME_INDEX = 4;
     public static final int COLUMN_NAME_INDEX = 3;
     public static final int TABLE_NAME_INDEX = 2;
@@ -124,6 +123,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
     public static final byte[] PHYSICAL_NAME_BYTES = Bytes.toBytes(PHYSICAL_NAME);
 
     public static final String COLUMN_FAMILY = "COLUMN_FAMILY";
+    public static final byte[] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY);
     public static final String TABLE_CAT = "TABLE_CAT";
     public static final String TABLE_CATALOG = "TABLE_CATALOG";
     public static final String TABLE_SCHEM = "TABLE_SCHEM";
@@ -514,6 +514,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
         if (colPattern == null || colPattern.length() == 0) {
             appendConjunction(where);
             where.append(COLUMN_NAME + " is not null" );
+            appendConjunction(where);
+            where.append(LINK_TYPE + " is null" );
         }
         boolean isTenantSpecificConnection = connection.getTenantId() != null;
         if (isTenantSpecificConnection) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8093d10f/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 59252ad..ee9f3d0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -58,6 +58,7 @@ import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS;
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS;
 import static org.apache.phoenix.util.UpgradeUtil.getSysCatalogSnapshotName;
+import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_11_0;
 import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 
 import java.io.IOException;
@@ -2768,6 +2769,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0);
                     clearCache();
                 }
+                if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0) {
+                    upgradeTo4_11_0(metaConnection);
+                }
             }
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8093d10f/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d09284f..50ff64b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -313,7 +313,16 @@ public class MetaDataClient {
             COLUMN_QUALIFIER_COUNTER + 
             ") VALUES (?, ?, ?, ?, ?)";
 
-    public static final String INCREMENT_SEQ_NUM =
+    private static final String CREATE_CHILD_LINK =
+            "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
+                    TENANT_ID + "," +
+                    TABLE_SCHEM + "," +
+                    TABLE_NAME + "," +
+                    COLUMN_NAME + "," +
+                    COLUMN_FAMILY + "," +
+                    LINK_TYPE + 
+                    ") VALUES (?, ?, ?, ?, ?, ?)";
+    private static final String INCREMENT_SEQ_NUM =
             "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
                     TENANT_ID + "," +
                     TABLE_SCHEM + "," +
@@ -2063,9 +2072,7 @@ public class MetaDataClient {
                     }
                     pkColumns = newLinkedHashSet(parent.getPKColumns());
 
-                    // Add row linking from view to its parent table
-                    // FIXME: not currently used, but see PHOENIX-1367
-                    // as fixing that will require it's usage.
+                    // Add row linking view to its parent 
                     PreparedStatement linkStatement = connection.prepareStatement(CREATE_VIEW_LINK);
                     linkStatement.setString(1, tenantIdStr);
                     linkStatement.setString(2, schemaName);
@@ -2074,6 +2081,15 @@ public class MetaDataClient {
                     linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue());
                     linkStatement.setString(6, parent.getTenantId() == null ? null : parent.getTenantId().getString());
                     linkStatement.execute();
+                    // Add row linking parent to view
+                    linkStatement = connection.prepareStatement(CREATE_CHILD_LINK);
+                    linkStatement.setString(1, parent.getTenantId() == null ? null : parent.getTenantId().getString());
+                    linkStatement.setString(2, parent.getSchemaName() == null ? null : parent.getSchemaName().getString());
+                    linkStatement.setString(3, parent.getTableName().getString());
+                    linkStatement.setString(4, tenantIdStr);
+                    linkStatement.setString(5, SchemaUtil.getTableName(schemaName, tableName));
+                    linkStatement.setByte(6, LinkType.CHILD_TABLE.getSerializedValue());
+                    linkStatement.execute();
                 }
             } else {
                 columns = new LinkedHashMap<PColumn,PColumn>(colDefs.size());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8093d10f/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index 91a41a3..24a494f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -141,14 +141,18 @@ public interface PTable extends PMetaDataEntity {
          */
         INDEX_TABLE((byte)1),
         /**
-         * Link from a view to its physical table
+         * Link from a view or index to its physical table
          */
         PHYSICAL_TABLE((byte)2),
         /**
          * Link from a view to its parent table
          */
-        PARENT_TABLE((byte)3);
-        
+        PARENT_TABLE((byte)3),
+        /**
+         * Link from a parent table to its child view
+         */
+        CHILD_TABLE((byte)4);
+
         private final byte[] byteValue;
         private final byte serializedValue;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8093d10f/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 2e25b13..b52cb79 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -306,6 +306,23 @@ public class MetaDataUtil {
         return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY, schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName, QueryConstants.SEPARATOR_BYTE_ARRAY, tableName, QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, indexName);
     }
     
+    public static byte[] getChildLinkKey(PName parentTenantId, PName parentSchemaName, PName parentTableName, PName viewTenantId, PName viewName) {
+        return ByteUtil.concat(parentTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentTenantId.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                        parentSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentSchemaName.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                        parentTableName.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY,
+                        viewTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : viewTenantId.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                        viewName.getBytes());
+    }
+    
+    public static Cell getCell(List<Cell> cells, byte[] cq) {
+        for (Cell cell : cells) {
+            if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cq, 0, cq.length) == 0) {
+                return cell;
+            }
+        }
+        return null;
+    }
+    
     public static boolean isMultiTenant(Mutation m, KeyValueBuilder builder, ImmutableBytesWritable ptr) {
         if (getMutationValue(m, PhoenixDatabaseMetaData.MULTI_TENANT_BYTES, builder, ptr)) {
             return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8093d10f/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 62fefc7..02e4ec2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -903,6 +903,7 @@ public class UpgradeUtil {
                         "TABLE_SCHEM %s " +
                         "AND TABLE_NAME = ? " +
                         "AND COLUMN_NAME IS NOT NULL " +
+                        "AND LINK_TYPE IS NULL " +
                         "ORDER BY " + 
                         ORDINAL_POSITION;
 
@@ -1073,6 +1074,78 @@ public class UpgradeUtil {
         }
     }
     
+    /**
+     * Upgrade the metadata in the catalog table to enable adding columns to tables with views
+     * @param oldMetaConnection caller should take care of closing the passed connection appropriately
+     * @throws SQLException
+     */
+    public static void upgradeTo4_11_0(PhoenixConnection oldMetaConnection) throws SQLException {
+        PhoenixConnection metaConnection = null;
+        try {
+            // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG 
+            metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
+            logger.info("Upgrading metadata to add parent to child links for views");
+            metaConnection.commit();
+            //     physical table 
+            //         |  
+            //     child view    
+            //         |
+            //     grand child view
+            // Create parent table to child view CHILD link. As the PARENT link from child view to physical table is not there (it gets overwritten with the PHYSICAL link) use the PHYSICAL link instead.
+            // We need to filter out grand child views PHYSICAL links while running this query
+            String createChildLink = "UPSERT INTO SYSTEM.CATALOG(TENANT_ID,TABLE_SCHEM,TABLE_NAME,COLUMN_NAME,COLUMN_FAMILY,LINK_TYPE)" +
+                                        "SELECT PARENT_TENANT_ID," + 
+                                        "       CASE INSTR(COLUMN_FAMILY,'.')" +
+                                        "              WHEN 0 THEN NULL" + 
+                                        "              ELSE REGEXP_SUBSTR(COLUMN_FAMILY,'[^\\.]+')" + 
+                                        "       END AS PARENT_SCHEMA," + 
+                                        "       CASE INSTR(COLUMN_FAMILY,'.')" + 
+                                        "              WHEN 0 THEN COLUMN_FAMILY" + 
+                                        "              ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1)" + 
+                                        "       END AS PARENT_TABLE," + 
+                                        "       TENANT_ID," + 
+                                        "       CASE WHEN TABLE_SCHEM IS NULL THEN TABLE_NAME" + 
+                                        "            ELSE TABLE_SCHEM||'.'||TABLE_NAME" + 
+                                        "       END AS VIEW_NAME," + 
+                                        "       4 AS LINK_TYPE " + 
+                                        "FROM SYSTEM.CATALOG(PARENT_TENANT_ID VARCHAR)" + 
+                                        "WHERE LINK_TYPE = 2 " +
+                                        "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) NOT IN (   " +
+                                        "       SELECT TENANT_ID, " +
+                                        "              TABLE_SCHEM, " +
+                                        "              TABLE_NAME " +
+                                        "       FROM   SYSTEM.CATALOG " +
+                                        "       WHERE  LINK_TYPE = 3 )";
+            metaConnection.createStatement().execute(createChildLink);
+            metaConnection.commit();
+            // Create child view to grand child view CHILD link using grand child view to child view PARENT link.
+            String createGrandChildLink = "UPSERT INTO SYSTEM.CATALOG(TENANT_ID,TABLE_SCHEM,TABLE_NAME,COLUMN_NAME,COLUMN_FAMILY,LINK_TYPE)" +
+                                        "SELECT PARENT_TENANT_ID," + 
+                                        "       CASE INSTR(COLUMN_FAMILY,'.')" +
+                                        "              WHEN 0 THEN NULL" + 
+                                        "              ELSE REGEXP_SUBSTR(COLUMN_FAMILY,'[^\\.]+')" + 
+                                        "       END AS PARENT_SCHEMA," + 
+                                        "       CASE INSTR(COLUMN_FAMILY,'.')" + 
+                                        "              WHEN 0 THEN COLUMN_FAMILY" + 
+                                        "              ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1)" + 
+                                        "       END AS PARENT_TABLE," + 
+                                        "       TENANT_ID," + 
+                                        "       CASE WHEN TABLE_SCHEM IS NULL THEN TABLE_NAME" + 
+                                        "            ELSE TABLE_SCHEM||'.'||TABLE_NAME" + 
+                                        "       END AS VIEW_NAME," + 
+                                        "       4 AS LINK_TYPE " + 
+                                        "FROM SYSTEM.CATALOG(PARENT_TENANT_ID VARCHAR)" + 
+                                        "WHERE LINK_TYPE = 3 ";
+            metaConnection.createStatement().execute(createGrandChildLink);
+            metaConnection.commit();
+            metaConnection.getQueryServices().clearCache();
+        } finally {
+            if (metaConnection != null) {
+                metaConnection.close();
+            }
+        }
+    }
+    
     private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection,
             String tenantId, String schemaName, String viewOrTableName, int baseColumnCount)
             throws SQLException {


[10/46] phoenix git commit: Set version to 4.11.0-HBase-1.2-SNAPSHOT after release

Posted by td...@apache.org.
Set version to 4.11.0-HBase-1.2-SNAPSHOT after release


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bc814f15
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bc814f15
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bc814f15

Branch: refs/heads/omid
Commit: bc814f1566189ba10258547f3889af177e1abf04
Parents: e34431c
Author: Mujtaba <mu...@apache.org>
Authored: Tue Mar 14 15:37:45 2017 -0700
Committer: Mujtaba <mu...@apache.org>
Committed: Tue Mar 14 15:37:45 2017 -0700

----------------------------------------------------------------------
 phoenix-assembly/pom.xml           | 2 +-
 phoenix-client/pom.xml             | 2 +-
 phoenix-core/pom.xml               | 2 +-
 phoenix-flume/pom.xml              | 2 +-
 phoenix-hive/pom.xml               | 2 +-
 phoenix-kafka/pom.xml              | 2 +-
 phoenix-pherf/pom.xml              | 2 +-
 phoenix-pig/pom.xml                | 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml        | 2 +-
 phoenix-server/pom.xml             | 2 +-
 phoenix-spark/pom.xml              | 2 +-
 phoenix-tracing-webapp/pom.xml     | 2 +-
 pom.xml                            | 2 +-
 14 files changed, 14 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 232e7a6..a6f6f64 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-assembly</artifactId>
   <name>Phoenix Assembly</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 10ccfb0..fb0520c 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-client</artifactId>
   <name>Phoenix Client</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 1cd9117..9d6e0f4 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-core</artifactId>
   <name>Phoenix Core</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index d5b7bb7..809f7fd 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-flume</artifactId>
   <name>Phoenix - Flume</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-hive/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 628dc21..72b3c39 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-hive</artifactId>
   <name>Phoenix - Hive</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 438693f..5559e5e 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@
 	<parent>
 		<groupId>org.apache.phoenix</groupId>
 		<artifactId>phoenix</artifactId>
-		<version>4.10.0-HBase-1.2</version>
+		<version>4.11.0-HBase-1.2-SNAPSHOT</version>
 	</parent>
 	<artifactId>phoenix-kafka</artifactId>
 	<name>Phoenix - Kafka</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-pherf/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 60666c0..ecf8251 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@
 	<parent>
 		<groupId>org.apache.phoenix</groupId>
 		<artifactId>phoenix</artifactId>
-		<version>4.10.0-HBase-1.2</version>
+		<version>4.11.0-HBase-1.2-SNAPSHOT</version>
 	</parent>
 
 	<artifactId>phoenix-pherf</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index bce33a7..e191397 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-pig</artifactId>
   <name>Phoenix - Pig</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-queryserver-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver-client/pom.xml b/phoenix-queryserver-client/pom.xml
index 7db8ee4..432f854 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-queryserver-client</artifactId>
   <name>Phoenix Query Server Client</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-queryserver/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index dd38522..dc2a1ab 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-queryserver</artifactId>
   <name>Phoenix Query Server</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-server/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 86bb7f8..6204e99 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-server</artifactId>
   <name>Phoenix Server</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 690a286..612300f 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -28,7 +28,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.10.0-HBase-1.2</version>
+    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-spark</artifactId>
   <name>Phoenix - Spark</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/phoenix-tracing-webapp/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-tracing-webapp/pom.xml b/phoenix-tracing-webapp/pom.xml
index 41c0582..35f70ad 100755
--- a/phoenix-tracing-webapp/pom.xml
+++ b/phoenix-tracing-webapp/pom.xml
@@ -27,7 +27,7 @@
     <parent>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix</artifactId>
-      <version>4.10.0-HBase-1.2</version>
+      <version>4.11.0-HBase-1.2-SNAPSHOT</version>
     </parent>
 
     <artifactId>phoenix-tracing-webapp</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc814f15/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index a806be2..1b74aa1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.phoenix</groupId>
   <artifactId>phoenix</artifactId>
-  <version>4.10.0-HBase-1.2</version>
+  <version>4.11.0-HBase-1.2-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Phoenix</name>
   <description>A SQL layer over HBase</description>


[18/46] phoenix git commit: PHOENIX-3765 NPE in IndexMaintainer when using old client and 4.10 server

Posted by td...@apache.org.
PHOENIX-3765 NPE in IndexMaintainer when using old client and 4.10 server


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cd444d9a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cd444d9a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cd444d9a

Branch: refs/heads/omid
Commit: cd444d9a6a8e560889826bc491db7d71ad1960e5
Parents: 92e728e
Author: Samarth <sa...@salesforce.com>
Authored: Thu Apr 6 12:34:36 2017 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Thu Apr 6 12:34:36 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/index/IndexMaintainer.java   | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd444d9a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 2224e38..26c2421 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -1308,6 +1308,9 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         int encodedEstimatedIndexRowKeyBytesAndImmutableRows = WritableUtils.readVInt(input);
         this.immutableRows = encodedEstimatedIndexRowKeyBytesAndImmutableRows < 0;
         this.estimatedIndexRowKeyBytes = Math.abs(encodedEstimatedIndexRowKeyBytesAndImmutableRows);
+        // Needed for backward compatibility. Clients older than 4.10 will have non-encoded tables.
+        this.immutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
+        this.encodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
         initCachedState();
     }
     


[45/46] phoenix git commit: clean + fix merge bugs

Posted by td...@apache.org.
clean + fix merge bugs


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ffac47ee
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ffac47ee
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ffac47ee

Branch: refs/heads/omid
Commit: ffac47ee60c3af88b220e09dce230e718a68c42d
Parents: c840cc9
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Tue May 9 12:12:09 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Tue May 9 12:12:09 2017 +0300

----------------------------------------------------------------------
 .../org/apache/phoenix/tx/TxCheckpointIT.java   | 39 ++++++++++++++------
 .../apache/phoenix/execute/MutationState.java   | 23 +-----------
 .../transaction/OmidTransactionContext.java     |  6 ---
 .../transaction/PhoenixTransactionContext.java  |  8 ----
 .../transaction/TephraTransactionContext.java   |  1 -
 5 files changed, 28 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ffac47ee/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
index aac9586..989a97e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
@@ -36,6 +36,7 @@ import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -48,12 +49,25 @@ import org.junit.runners.Parameterized.Parameters;
 public class TxCheckpointIT extends ParallelStatsDisabledIT {
 	
 	private final boolean localIndex;
-	private final boolean mutable;
-
-	public TxCheckpointIT(boolean localIndex, boolean mutable) {
-		this.localIndex = localIndex;
-		this.mutable = mutable;
+	private final String tableDDLOptions;
 
+	public TxCheckpointIT(boolean localIndex, boolean mutable, boolean columnEncoded) {
+	    StringBuilder optionBuilder = new StringBuilder();
+	    this.localIndex = localIndex;
+	    if (!columnEncoded) {
+	        if (optionBuilder.length()!=0)
+	            optionBuilder.append(",");
+	        optionBuilder.append("COLUMN_ENCODED_BYTES=0");
+	    }
+	    if (!mutable) {
+	        if (optionBuilder.length()!=0)
+	            optionBuilder.append(",");
+	        optionBuilder.append("IMMUTABLE_ROWS=true");
+	        if (!columnEncoded) {
+	            optionBuilder.append(",IMMUTABLE_STORAGE_SCHEME="+PTableImpl.ImmutableStorageScheme.ONE_CELL_PER_COLUMN);
+	        }
+	    }
+	    this.tableDDLOptions = optionBuilder.toString();
 	}
 	
     private static Connection getConnection() throws SQLException {
@@ -66,10 +80,11 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
         return conn;
     }
 	
-	@Parameters(name="TxCheckpointIT_localIndex={0},mutable={1}") // name is used by failsafe as file name in reports
+	@Parameters(name="TxCheckpointIT_localIndex={0},mutable={1},columnEncoded={2}") // name is used by failsafe as file name in reports
     public static Collection<Boolean[]> data() {
         return Arrays.asList(new Boolean[][] {     
-                 { false, false }, { false, true }, { true, false }, { true, true }  
+                { false, false, false }, { false, false, true }, { false, true, false }, { false, true, true },
+                { true, false, false }, { true, false, true }, { true, true, false }, { true, true, true }
            });
     }
     
@@ -86,7 +101,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
         Connection conn = getConnection(props);
         conn.setAutoCommit(true);
         conn.createStatement().execute("CREATE SEQUENCE "+seqName);
-        conn.createStatement().execute("CREATE TABLE " + fullTableName + "(pk INTEGER PRIMARY KEY, val INTEGER)"+(!mutable? " IMMUTABLE_ROWS=true" : ""));
+        conn.createStatement().execute("CREATE TABLE " + fullTableName + "(pk INTEGER PRIMARY KEY, val INTEGER)"+tableDDLOptions);
         conn.createStatement().execute("CREATE "+(localIndex? "LOCAL " : "")+"INDEX " + indexName + " ON " + fullTableName + "(val)");
 
         conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES (NEXT VALUE FOR " + seqName + ",1)");
@@ -122,7 +137,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
         conn.setAutoCommit(false);
         try {
             Statement stmt = conn.createStatement();
-            stmt.execute("CREATE TABLE " + fullTableName + "(k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"+(!mutable? " IMMUTABLE_ROWS=true" : ""));
+            stmt.execute("CREATE TABLE " + fullTableName + "(k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"+tableDDLOptions);
             stmt.execute(indexDDL);
             
             stmt.executeUpdate("upsert into " + fullTableName + " values('x1', 'y1', 'a1')");
@@ -212,7 +227,7 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
 			Statement stmt = conn.createStatement();
 
 			stmt.execute("CREATE TABLE " + fullTableName + "(ID BIGINT NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"
-					+ (!mutable ? " IMMUTABLE_ROWS=true" : ""));
+					+ tableDDLOptions);
 			stmt.execute("CREATE " + (localIndex ? "LOCAL " : "")
 					+ "INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE(v2)");
 
@@ -302,9 +317,9 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
 			conn.setAutoCommit(false);
 			Statement stmt = conn.createStatement();
 			stmt.execute("CREATE TABLE " + fullTableName + "1(ID1 BIGINT NOT NULL PRIMARY KEY, FK1A INTEGER, FK1B INTEGER)"
-					+ (!mutable ? " IMMUTABLE_ROWS=true" : ""));
+					+ tableDDLOptions);
 			stmt.execute("CREATE TABLE " + fullTableName + "2(ID2 BIGINT NOT NULL PRIMARY KEY, FK2 INTEGER)"
-					+ (!mutable ? " IMMUTABLE_ROWS=true" : ""));
+					+ tableDDLOptions);
 			stmt.execute("CREATE " + (localIndex ? "LOCAL " : "")
 					+ "INDEX " + indexName + " ON " + fullTableName + "1 (FK1B)");
 			

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ffac47ee/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index e8d963e..c4d01b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -203,7 +203,7 @@ public class MutationState implements SQLCloseable {
      * when a data table transaction is started before the create index
      * but completes after it. In this case, we need to rerun the data
      * table transaction after the index creation so that the index rows
-     * are generated. See {@link #addDMLFence(PTable)} and TEPHRA-157
+     * are generated. See TEPHRA-157
      * for more information.
      * @param dataTable the data table upon which an index is being added
      * @throws SQLException
@@ -222,22 +222,6 @@ public class MutationState implements SQLCloseable {
         }
     }
     
-    /**
-     * Add an entry to the change set representing the DML operation that is starting.
-     * These entries will not conflict with each other, but they will conflict with a
-     * DDL operation of creating an index. See {@link #addDMLFence(PTable)} and TEPHRA-157
-     * for more information.
-     * @param table the table which is doing DML
-     * @throws SQLException
-     */
-    private void addDMLFence(PTable table) throws SQLException {
-        if (table.getType() == PTableType.INDEX || !table.isTransactional()) {
-            return;
-        }
-
-        phoenixTransactionContext.markDMLFence(table);
-    }
-    
     public boolean checkpointIfNeccessary(MutationPlan plan) throws SQLException {
         if (! phoenixTransactionContext.isTransactionRunning()  || plan.getTargetRef() == null || plan.getTargetRef().getTable() == null || !plan.getTargetRef().getTable().isTransactional()) {
             return false;
@@ -970,7 +954,6 @@ public class MutationState implements SQLCloseable {
                         if (table.isTransactional()) {
                             // Track tables to which we've sent uncommitted data
                             txTableRefs.add(origTableRef);
-//                            addDMLFence(table);
                             uncommittedPhysicalNames.add(table.getPhysicalName().getString());
 
                             // If we have indexes, wrap the HTable in a delegate HTable that
@@ -1231,10 +1214,6 @@ public class MutationState implements SQLCloseable {
                             startTransaction();
                             // Add back read fences
                             Set<TableRef> txTableRefs = txMutations.keySet();
-//                            for (TableRef tableRef : txTableRefs) {
-//                                PTable dataTable = tableRef.getTable();
-//                                addDMLFence(dataTable);
-//                            }
                             try {
                                 // Only retry if an index was added
                                 retryCommit = shouldResubmitTransaction(txTableRefs);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ffac47ee/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index 25ec0cf..2ae95bb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -45,12 +45,6 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public void markDMLFence(PTable table) {
-        // TODO Auto-generated method stub
-
-    }
-
-    @Override
     public void join(PhoenixTransactionContext ctx) {
         // TODO Auto-generated method stub
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ffac47ee/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index 5b1a837..a13b5a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -87,14 +87,6 @@ public interface PhoenixTransactionContext {
             throws SQLException;
 
     /**
-     * mark DML with table information for conflict detection of concurrent
-     * DDL operation, as explained in [PHOENIX-2478], [TEPHRA-157] and [OMID-56].
-     *
-     * @param table  the table that the DML command works on
-     */
-    public void markDMLFence(PTable table);
-
-    /**
      * Augment the current context with ctx modified keys
      *
      * @param ctx

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ffac47ee/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 447ce0e..5c4178d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -261,7 +261,6 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         }
     }
 
-    @Override
     public void markDMLFence(PTable table) {
         byte[] logicalKey = table.getName().getBytes();
         TransactionAware logicalTxAware = VisibilityFence.create(logicalKey);


[03/46] phoenix git commit: Remove redundant spaces

Posted by td...@apache.org.
Remove redundant spaces


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/acfc9d52
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/acfc9d52
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/acfc9d52

Branch: refs/heads/omid
Commit: acfc9d52875dcb3e1653d22e58f78ad150494355
Parents: 1d32eb2
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Thu Feb 9 16:33:20 2017 +0200
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Thu Feb 9 16:33:20 2017 +0200

----------------------------------------------------------------------
 .../transaction/OmidTransactionTable.java       |  2 +-
 .../transaction/PhoenixTransactionContext.java  | 40 ++++++++++----------
 .../transaction/PhoenixTransactionalTable.java  | 34 ++++++++---------
 3 files changed, 38 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/acfc9d52/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index 5a5291c..f15fdd3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -18,7 +18,7 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
     public OmidTransactionTable(PhoenixTransactionContext ctx) {
         // TODO Auto-generated constructor stub
     }
-    
+
     @Override
     public Result get(Get get) throws IOException {
         // TODO Auto-generated method stub

http://git-wip-us.apache.org/repos/asf/phoenix/blob/acfc9d52/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index 3ac0ae3..f07640e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -9,43 +9,43 @@ public interface PhoenixTransactionContext {
 
     /**
      * Starts a transaction
-     * 
+     *
      * @throws SQLException
      */
     public void begin() throws SQLException;
-    
+
     /**
      * Commits a transaction
-     * 
+     *
      * @throws SQLException
      */
     public void commit() throws SQLException;
-    
+
     /**
      * Rollback a transaction
-     * 
+     *
      * @throws SQLException
      */
     public void abort() throws SQLException;
 
     /**
      * Rollback a transaction
-     * 
-     * @param e  
+     *
+     * @param e
      * @throws SQLException
      */
     public void abort(SQLException e) throws SQLException;
-    
+
     /**
      * Create a checkpoint in a transaction as defined in [TEPHRA-96]
      * @throws SQLException
      */
     public void checkpoint() throws SQLException;
-    
+
     /**
-     * Commit DDL to guarantee that no transaction started before create index 
+     * Commit DDL to guarantee that no transaction started before create index
      * and committed afterwards, as explained in [PHOENIX-2478], [TEPHRA-157] and [OMID-56].
-     * 
+     *
      * @param dataTable  the table that the DDL command works on
      * @throws SQLException
      * @throws InterruptedException
@@ -53,22 +53,22 @@ public interface PhoenixTransactionContext {
      */
     public void commitDDLFence(PTable dataTable)
             throws SQLException, InterruptedException, TimeoutException;
-    
+
     /**
-     * mark DML with table information for conflict detection of concurrent 
+     * mark DML with table information for conflict detection of concurrent
      * DDL operation, as explained in [PHOENIX-2478], [TEPHRA-157] and [OMID-56].
-     * 
+     *
      * @param table  the table that the DML command works on
      */
     public void markDMLFence(PTable table);
-    
+
     /**
      * Augment the current context with ctx modified keys
-     * 
+     *
      * @param ctx
      */
     public void join(PhoenixTransactionContext ctx);
-    
+
     /**
      * Is there a transaction in flight?
      */
@@ -78,12 +78,12 @@ public interface PhoenixTransactionContext {
      * Reset transaction state
      */
     public void reset();
-    
-    /** 
+
+    /**
      * Returns transaction unique identifier
      */
     long getTransactionId();
-    
+
     /**
      * Returns transaction snapshot id
      */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/acfc9d52/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
index 2316dc4..ff2632c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
@@ -19,7 +19,7 @@ public interface PhoenixTransactionalTable {
     /**
      * Transaction version of {@link HTableInterface#get(Get get)}
      * @param get
-     * @return 
+     * @return
      * @throws IOException
      */
     public Result get(Get get) throws IOException;
@@ -34,25 +34,25 @@ public interface PhoenixTransactionalTable {
     /**
      * Transactional version of {@link HTableInterface#delete(Delete delete)}
      *
-     * @param delete 
-     * @throws IOException 
+     * @param delete
+     * @throws IOException
      */
     public void delete(Delete delete) throws IOException;
-    
+
     /**
      * Transactional version of {@link HTableInterface#getScanner(Scan scan)}
      *
-     * @param scan 
+     * @param scan
      * @return ResultScanner
      * @throws IOException
      */
     public ResultScanner getScanner(Scan scan) throws IOException;
-    
+
     /**
      * Returns Htable name
      */
     public byte[] getTableName();
-    
+
     /**
      * Returns Htable configuration object
      */
@@ -60,7 +60,7 @@ public interface PhoenixTransactionalTable {
 
     /**
      * Returns HTableDescriptor of Htable
-     * @throws IOException 
+     * @throws IOException
      */
     public HTableDescriptor getTableDescriptor() throws IOException;
 
@@ -69,28 +69,28 @@ public interface PhoenixTransactionalTable {
      * @throws IOException
      */
     public boolean exists(Get get) throws IOException;
-    
+
     /**
      * Transactional version of {@link HTableInterface#get(List gets)}
-     * @throws IOException 
+     * @throws IOException
      */
     public Result[] get(List<Get> gets) throws IOException;
-    
+
     /**
      * Transactional version of {@link HTableInterface#getScanner(byte[] family)}
-     * @throws IOException 
+     * @throws IOException
      */
     public ResultScanner getScanner(byte[] family) throws IOException;
 
     /**
      * Transactional version of {@link HTableInterface#getScanner(byte[] family, byte[] qualifier)}
-     * @throws IOException 
+     * @throws IOException
      */
     public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
 
     /**
      * Transactional version of {@link HTableInterface#put(List puts)}
-     * @throws IOException 
+     * @throws IOException
      */
     public void put(List<Put> puts) throws IOException;
 
@@ -99,7 +99,7 @@ public interface PhoenixTransactionalTable {
      * @throws IOException
      */
     public void delete(List<Delete> deletes) throws IOException;
-    
+
     /**
      * Return the underling htable
      */
@@ -129,10 +129,10 @@ public interface PhoenixTransactionalTable {
      * Delegates to see HTable.flushCommits()
      */
     public void flushCommits() throws IOException;
-    
+
     /**
      * Releases resources
-     * @throws IOException 
+     * @throws IOException
      */
     public void close() throws IOException;
 }


[24/46] phoenix git commit: PHOENIX-3789 Addendum to execute cross region index maintenance calls in postBatchMutateIndispensably

Posted by td...@apache.org.
PHOENIX-3789 Addendum to execute cross region index maintenance calls in postBatchMutateIndispensably


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ee886bab
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ee886bab
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ee886bab

Branch: refs/heads/omid
Commit: ee886bab91f58f4b54efc619d2ec3aec92d40a92
Parents: 8309b22
Author: James Taylor <ja...@apache.org>
Authored: Mon Apr 17 14:53:41 2017 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Mon Apr 17 19:12:02 2017 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/hbase/index/Indexer.java | 39 ++++++++++++++++----
 1 file changed, 31 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ee886bab/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index f485bdf..de98051 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -372,7 +372,7 @@ public class Indexer extends BaseRegionObserver {
       super.postPut(e, put, edit, durability);
           return;
         }
-    doPost(edit, put, durability, true);
+    doPost(edit, put, durability, true, false);
   }
 
   @Override
@@ -382,10 +382,29 @@ public class Indexer extends BaseRegionObserver {
       super.postDelete(e, delete, edit, durability);
           return;
         }
-    doPost(edit, delete, durability, true);
+    doPost(edit, delete, durability, true, false);
   }
 
   @Override
+  public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
+      MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+      if (this.disabled) {
+        super.postBatchMutate(c, miniBatchOp);
+        return;
+      }
+      WALEdit edit = miniBatchOp.getWalEdit(0);
+      if (edit != null) {
+        IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
+        if (ikv != null) {
+          // This will prevent the postPut and postDelete hooks from doing anything
+          // We need to do this now, as the postBatchMutateIndispensably (where the
+          // actual index writing gets done) is called after the postPut and postDelete.
+          ikv.markBatchFinished();
+        }
+      }
+  }
+  
+  @Override
   public void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> c,
       MiniBatchOperationInProgress<Mutation> miniBatchOp, final boolean success) throws IOException {
       if (this.disabled) {
@@ -398,13 +417,17 @@ public class Indexer extends BaseRegionObserver {
         //each batch operation, only the first one will have anything useful, so we can just grab that
         Mutation mutation = miniBatchOp.getOperation(0);
         WALEdit edit = miniBatchOp.getWalEdit(0);
-        doPost(edit, mutation, mutation.getDurability(), false);
+        // We're forcing the index writes here because we've marked the index batch as "finished"
+        // to prevent postPut and postDelete from doing anything, but hold off on writing them
+        // until now so we're outside of the MVCC lock (see PHOENIX-3789). Without this hacky
+        // forceWrite flag, we'd ignore them again here too.
+        doPost(edit, mutation, mutation.getDurability(), false, true);
     }
   }
 
-  private void doPost(WALEdit edit, Mutation m, final Durability durability, boolean allowLocalUpdates) throws IOException {
+  private void doPost(WALEdit edit, Mutation m, final Durability durability, boolean allowLocalUpdates, boolean forceWrite) throws IOException {
     try {
-      doPostWithExceptions(edit, m, durability, allowLocalUpdates);
+      doPostWithExceptions(edit, m, durability, allowLocalUpdates, forceWrite);
       return;
     } catch (Throwable e) {
       rethrowIndexingException(e);
@@ -413,7 +436,7 @@ public class Indexer extends BaseRegionObserver {
         "Somehow didn't complete the index update, but didn't return succesfully either!");
   }
 
-  private void doPostWithExceptions(WALEdit edit, Mutation m, final Durability durability, boolean allowLocalUpdates)
+  private void doPostWithExceptions(WALEdit edit, Mutation m, final Durability durability, boolean allowLocalUpdates, boolean forceWrite)
           throws Exception {
       //short circuit, if we don't need to do any work
       if (durability == Durability.SKIP_WAL || !this.builder.isEnabled(m) || edit == null) {
@@ -447,14 +470,14 @@ public class Indexer extends BaseRegionObserver {
            * once (this hook gets called with the same WALEdit for each Put/Delete in a batch, which can
            * lead to writing all the index updates for each Put/Delete).
            */
-          if (!ikv.getBatchFinished() || allowLocalUpdates) {
+          if ((!ikv.getBatchFinished() || forceWrite) || allowLocalUpdates) {
               Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(edit);
 
               // the WAL edit is kept in memory and we already specified the factory when we created the
               // references originally - therefore, we just pass in a null factory here and use the ones
               // already specified on each reference
               try {
-            	  if (!ikv.getBatchFinished()) {
+            	  if (!ikv.getBatchFinished() || forceWrite) {
             		  current.addTimelineAnnotation("Actually doing index update for first time");
             		  writer.writeAndKillYourselfOnFailure(indexUpdates, allowLocalUpdates);
             	  } else if (allowLocalUpdates) {


[36/46] phoenix git commit: Remove tephra dependency from BaseTest

Posted by td...@apache.org.
Remove tephra dependency from BaseTest


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b3a21368
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b3a21368
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b3a21368

Branch: refs/heads/omid
Commit: b3a213685ef97a41c1a369f035949b22b03d6083
Parents: f090dd2
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Mon May 8 12:27:11 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Mon May 8 12:27:11 2017 +0300

----------------------------------------------------------------------
 .../apache/phoenix/execute/MutationState.java   | 14 ++--
 .../transaction/OmidTransactionContext.java     | 19 ++++++
 .../transaction/PhoenixTransactionContext.java  | 16 +++++
 .../transaction/TephraTransactionContext.java   | 64 ++++++++++++++++++
 .../transaction/TephraTransactionTable.java     | 12 +++-
 .../apache/phoenix/util/TransactionUtil.java    |  4 +-
 .../java/org/apache/phoenix/query/BaseTest.java | 68 ++------------------
 7 files changed, 124 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3a21368/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 2b72be1..e8d963e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -297,7 +297,7 @@ public class MutationState implements SQLCloseable {
     public HTableInterface getHTable(PTable table) throws SQLException {
         HTableInterface htable = this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes());
         if (table.isTransactional() && phoenixTransactionContext.isTransactionRunning()) {
-            PhoenixTransactionalTable phoenixTransactionTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, htable, table.isImmutableRows());
+            PhoenixTransactionalTable phoenixTransactionTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, htable, table);
             // Using cloned mutationState as we may have started a new transaction already
             // if auto commit is true and we need to use the original one here.
             htable = phoenixTransactionTable;
@@ -970,7 +970,7 @@ public class MutationState implements SQLCloseable {
                         if (table.isTransactional()) {
                             // Track tables to which we've sent uncommitted data
                             txTableRefs.add(origTableRef);
-                            addDMLFence(table);
+//                            addDMLFence(table);
                             uncommittedPhysicalNames.add(table.getPhysicalName().getString());
 
                             // If we have indexes, wrap the HTable in a delegate HTable that
@@ -980,7 +980,7 @@ public class MutationState implements SQLCloseable {
                                 hTable = new MetaDataAwareHTable(hTable, origTableRef);
                             }
 
-                            hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table.isImmutableRows());                          
+                            hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
                         }
                         
                         long numMutations = mutationList.size();
@@ -1231,10 +1231,10 @@ public class MutationState implements SQLCloseable {
                             startTransaction();
                             // Add back read fences
                             Set<TableRef> txTableRefs = txMutations.keySet();
-                            for (TableRef tableRef : txTableRefs) {
-                                PTable dataTable = tableRef.getTable();
-                                addDMLFence(dataTable);
-                            }
+//                            for (TableRef tableRef : txTableRefs) {
+//                                PTable dataTable = tableRef.getTable();
+//                                addDMLFence(dataTable);
+//                            }
                             try {
                                 // Only retry if an index was added
                                 retryCommit = shouldResubmitTransaction(txTableRefs);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3a21368/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index cec07d3..25ec0cf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -1,5 +1,6 @@
 package org.apache.phoenix.transaction;
 
+import java.io.IOException;
 import java.sql.SQLException;
 import java.util.concurrent.TimeoutException;
 
@@ -141,4 +142,22 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
         // TODO Auto-generated method stub
         return null;
     }
+
+    @Override
+    public void setTxnConfigs(Configuration config, String tmpFolder, int defaultTxnTimeoutSeconds) throws IOException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void setupTxManager(Configuration config, String url) throws SQLException {
+        // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void tearDownTxManager() {
+        // TODO Auto-generated method stub
+
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3a21368/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index 36f7804..5b1a837 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -8,6 +8,7 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.twill.zookeeper.ZKClientService;
 import org.slf4j.Logger;
 
+import java.io.IOException;
 import java.sql.SQLException;
 import java.util.concurrent.TimeoutException;
 
@@ -164,4 +165,19 @@ public interface PhoenixTransactionContext {
      * @return the family delete marker
      */
     public byte[] get_famility_delete_marker(); 
+
+    /**
+     * Setup transaction manager's configuration for testing
+     */
+     public void setTxnConfigs(Configuration config, String tmpFolder, int defaultTxnTimeoutSeconds) throws IOException;
+
+    /**
+     * Setup transaction manager for testing
+     */
+    public void setupTxManager(Configuration config, String url) throws SQLException;
+
+    /**
+     * Tear down transaction manager for testing
+     */
+    public void tearDownTxManager();
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3a21368/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 0334826..447ce0e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -35,7 +35,13 @@ import org.apache.tephra.util.TxUtils;
 import org.apache.tephra.visibility.FenceWait;
 import org.apache.tephra.visibility.VisibilityFence;
 import org.apache.tephra.zookeeper.TephraZKClientService;
+import org.apache.tephra.distributed.TransactionService;
+import org.apache.tephra.metrics.TxMetricsCollector;
+import org.apache.tephra.persist.HDFSTransactionStateStorage;
+import org.apache.tephra.snapshot.SnapshotCodecProvider;
+import org.apache.twill.discovery.DiscoveryService;
 import org.apache.twill.discovery.ZKDiscoveryService;
+import org.apache.twill.internal.utils.Networks;
 import org.apache.twill.zookeeper.RetryStrategies;
 import org.apache.twill.zookeeper.ZKClientService;
 import org.apache.twill.zookeeper.ZKClientServices;
@@ -43,6 +49,7 @@ import org.apache.twill.zookeeper.ZKClients;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Lists;
+import com.google.inject.util.Providers;
 
 import org.slf4j.Logger;
 
@@ -51,6 +58,9 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     private static final TransactionCodec CODEC = new TransactionCodec();
 
     private static TransactionSystemClient txClient = null;
+    private static ZKClientService zkClient = null;
+    private static TransactionService txService = null;
+    private static TransactionManager txManager = null;
 
     private final List<TransactionAware> txAwares;
     private final TransactionContext txContext;
@@ -410,6 +420,60 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         return TxConstants.FAMILY_DELETE_QUALIFIER;
     }
 
+    @Override
+    public void setTxnConfigs(Configuration config, String tmpFolder, int defaultTxnTimeoutSeconds) throws IOException {
+        config.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false);
+        config.set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, "n-times");
+        config.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1);
+        config.setInt(TxConstants.Service.CFG_DATA_TX_BIND_PORT, Networks.getRandomPort());
+        config.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder);
+        config.setInt(TxConstants.Manager.CFG_TX_TIMEOUT, defaultTxnTimeoutSeconds);
+        config.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
+        config.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5L);
+    }
+
+    @Override
+    public void setupTxManager(Configuration config, String url) throws SQLException {
+
+        if (txService != null) {
+            return;
+        }
+
+        ConnectionInfo connInfo = ConnectionInfo.create(url);
+        zkClient = ZKClientServices.delegate(
+          ZKClients.reWatchOnExpire(
+            ZKClients.retryOnFailure(
+              ZKClientService.Builder.of(connInfo.getZookeeperConnectionString())
+                .setSessionTimeout(config.getInt(HConstants.ZK_SESSION_TIMEOUT,
+                        HConstants.DEFAULT_ZK_SESSION_TIMEOUT))
+                .build(),
+              RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS)
+            )
+          )
+        );
+        zkClient.startAndWait();
+
+        DiscoveryService discovery = new ZKDiscoveryService(zkClient);
+        txManager = new TransactionManager(config, new HDFSTransactionStateStorage(config, new SnapshotCodecProvider(config), new TxMetricsCollector()), new TxMetricsCollector());
+        txService = new TransactionService(config, zkClient, discovery, Providers.of(txManager));
+        txService.startAndWait();
+    }
+
+    @Override
+    public void tearDownTxManager() {
+        try {
+            if (txService != null) txService.stopAndWait();
+        } finally {
+            try {
+                if (zkClient != null) zkClient.stopAndWait();
+            } finally {
+                txService = null;
+                zkClient = null;
+                txManager = null;
+            }
+        }
+    }
+
     /**
      * TephraTransactionContext specific functions
      */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3a21368/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index e33a280..49753f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.tephra.TxConstants;
 import org.apache.tephra.hbase.TransactionAwareHTable;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
 
 import com.google.protobuf.Descriptors.MethodDescriptor;
 import com.google.protobuf.Message;
@@ -38,18 +40,22 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
     private TephraTransactionContext tephraTransactionContext;
 
     public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) {
-        this(ctx, hTable, false);
+        this(ctx, hTable, null);
     }
 
-    public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable, boolean isImmutableRows) {
+    public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable, PTable pTable) {
 
         assert(ctx instanceof TephraTransactionContext);
 
         tephraTransactionContext = (TephraTransactionContext) ctx;
 
-        transactionAwareHTable = new TransactionAwareHTable(hTable, isImmutableRows ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
+        transactionAwareHTable = new TransactionAwareHTable(hTable, (pTable != null && pTable.isImmutableRows()) ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
 
         tephraTransactionContext.addTransactionAware(transactionAwareHTable);
+
+        if (pTable != null && pTable.getType() != PTableType.INDEX) {
+            tephraTransactionContext.markDMLFence(pTable);
+        }
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3a21368/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
index 0a55147..01b775e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
@@ -50,8 +50,8 @@ public class TransactionUtil {
         return serverTimeStamp / TransactionFactory.getTransactionFactory().getTransactionContext().getMaxTransactionsPerSecond();
     }
     
-    public static PhoenixTransactionalTable getPhoenixTransactionTable(PhoenixTransactionContext phoenixTransactionContext, HTableInterface htable, boolean isImmutableRows) {
-        return new TephraTransactionTable(phoenixTransactionContext, htable, isImmutableRows);
+    public static PhoenixTransactionalTable getPhoenixTransactionTable(PhoenixTransactionContext phoenixTransactionContext, HTableInterface htable, PTable pTable) {
+        return new TephraTransactionTable(phoenixTransactionContext, htable, pTable);
     }
     
     // we resolve transactional tables at the txn read pointer

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3a21368/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 078c1e8..ff1007d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -130,12 +130,12 @@ import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
-import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableAlreadyExistsException;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.ConfigUtil;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -143,19 +143,6 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
-import org.apache.tephra.TransactionManager;
-import org.apache.tephra.TxConstants;
-import org.apache.tephra.distributed.TransactionService;
-import org.apache.tephra.metrics.TxMetricsCollector;
-import org.apache.tephra.persist.HDFSTransactionStateStorage;
-import org.apache.tephra.snapshot.SnapshotCodecProvider;
-import org.apache.twill.discovery.DiscoveryService;
-import org.apache.twill.discovery.ZKDiscoveryService;
-import org.apache.twill.internal.utils.Networks;
-import org.apache.twill.zookeeper.RetryStrategies;
-import org.apache.twill.zookeeper.ZKClientService;
-import org.apache.twill.zookeeper.ZKClientServices;
-import org.apache.twill.zookeeper.ZKClients;
 import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
@@ -165,7 +152,6 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.inject.util.Providers;
 
 /**
  * 
@@ -186,9 +172,6 @@ public abstract class BaseTest {
     private static final Map<String,String> tableDDLMap;
     private static final Logger logger = LoggerFactory.getLogger(BaseTest.class);
     protected static final int DEFAULT_TXN_TIMEOUT_SECONDS = 30;
-    private static ZKClientService zkClient;
-    private static TransactionService txService;
-    protected static TransactionManager txManager;
     @ClassRule
     public static TemporaryFolder tmpFolder = new TemporaryFolder();
     private static final int dropTableTimeout = 300; // 5 mins should be long enough.
@@ -437,50 +420,15 @@ public abstract class BaseTest {
     }
     
     private static void tearDownTxManager() throws SQLException {
-        try {
-            if (txService != null) txService.stopAndWait();
-        } finally {
-            try {
-                if (zkClient != null) zkClient.stopAndWait();
-            } finally {
-                txService = null;
-                zkClient = null;
-                txManager = null;
-            }
-        }
-        
+        TransactionFactory.getTransactionFactory().getTransactionContext().tearDownTxManager();
     }
-    
+
     protected static void setTxnConfigs() throws IOException {
-        config.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false);
-        config.set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, "n-times");
-        config.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1);
-        config.setInt(TxConstants.Service.CFG_DATA_TX_BIND_PORT, Networks.getRandomPort());
-        config.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath());
-        config.setInt(TxConstants.Manager.CFG_TX_TIMEOUT, DEFAULT_TXN_TIMEOUT_SECONDS);
-        config.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
-        config.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5L);
+        TransactionFactory.getTransactionFactory().getTransactionContext().setTxnConfigs(config, tmpFolder.newFolder().getAbsolutePath(), DEFAULT_TXN_TIMEOUT_SECONDS);
     }
-    
-    protected static void setupTxManager() throws SQLException, IOException {
-        ConnectionInfo connInfo = ConnectionInfo.create(getUrl());
-        zkClient = ZKClientServices.delegate(
-          ZKClients.reWatchOnExpire(
-            ZKClients.retryOnFailure(
-              ZKClientService.Builder.of(connInfo.getZookeeperConnectionString())
-                .setSessionTimeout(config.getInt(HConstants.ZK_SESSION_TIMEOUT,
-                        HConstants.DEFAULT_ZK_SESSION_TIMEOUT))
-                .build(),
-              RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS)
-            )
-          )
-        );
-        zkClient.startAndWait();
 
-        DiscoveryService discovery = new ZKDiscoveryService(zkClient);
-        txManager = new TransactionManager(config, new HDFSTransactionStateStorage(config, new SnapshotCodecProvider(config), new TxMetricsCollector()), new TxMetricsCollector());
-        txService = new TransactionService(config, zkClient, discovery, Providers.of(txManager));
-        txService.startAndWait();
+    protected static void setupTxManager() throws SQLException, IOException {
+        TransactionFactory.getTransactionFactory().getTransactionContext().setupTxManager(config, getUrl());
     }
 
     private static String checkClusterInitialized(ReadOnlyProps serverProps) throws Exception {
@@ -499,9 +447,7 @@ public abstract class BaseTest {
     }
     
     private static void checkTxManagerInitialized(ReadOnlyProps clientProps) throws SQLException, IOException {
-        if (txService == null) {
-            setupTxManager();
-        }
+        setupTxManager();
     }
 
     /**


[07/46] phoenix git commit: extends PhoenixTransactionTable to inherit from HtableInterface and implement the needed methods in TephraTransactionTable

Posted by td...@apache.org.
extends PhoenixTransactionTable to inherit from HtableInterface and implement the needed methods in TephraTransactionTable


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d2c16533
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d2c16533
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d2c16533

Branch: refs/heads/omid
Commit: d2c1653309fa97974b1c8bce3352cfc54d180567
Parents: c451343
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Tue Mar 7 11:57:42 2017 +0200
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Tue Mar 7 11:57:42 2017 +0200

----------------------------------------------------------------------
 .../transaction/OmidTransactionTable.java       | 196 ++++++++++++++++++-
 .../transaction/PhoenixTransactionalTable.java  |   2 +-
 .../transaction/TephraTransactionTable.java     | 173 ++++++++++++++++
 3 files changed, 365 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d2c16533/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index 725fe16..d2cd020 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -2,16 +2,32 @@ package org.apache.phoenix.transaction;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
 
 public class OmidTransactionTable implements PhoenixTransactionalTable {
 
@@ -89,19 +105,16 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
     @Override
     public void put(List<Put> puts) throws IOException {
         // TODO Auto-generated method stub
-
     }
 
     @Override
     public void delete(List<Delete> deletes) throws IOException {
         // TODO Auto-generated method stub
-
     }
 
     @Override
     public void setAutoFlush(boolean autoFlush) {
         // TODO Auto-generated method stub
-
     }
 
     @Override
@@ -119,19 +132,192 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
     @Override
     public void setWriteBufferSize(long writeBufferSize) throws IOException {
         // TODO Auto-generated method stub
-
     }
 
     @Override
     public void flushCommits() throws IOException {
         // TODO Auto-generated method stub
-
     }
 
     @Override
     public void close() throws IOException {
         // TODO Auto-generated method stub
+    }
+
+    @Override
+    public long incrementColumnValue(byte[] row, byte[] family,
+            byte[] qualifier, long amount, boolean writeToWAL)
+            throws IOException {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public Boolean[] exists(List<Get> gets) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
+        // TODO Auto-generated method stub
+    }
+
+    @Override
+    public void setAutoFlushTo(boolean autoFlush) {
+        // TODO Auto-generated method stub
+    }
+
+    @Override
+    public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public TableName getName() {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public boolean[] existsAll(List<Get> gets) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public void batch(List<? extends Row> actions, Object[] results)
+            throws IOException, InterruptedException {
+        // TODO Auto-generated method stub
+    }
+
+    @Override
+    public Object[] batch(List<? extends Row> actions) throws IOException,
+            InterruptedException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public <R> void batchCallback(List<? extends Row> actions,
+            Object[] results, Callback<R> callback) throws IOException,
+            InterruptedException {
+        // TODO Auto-generated method stub
+    }
+
+    @Override
+    public <R> Object[] batchCallback(List<? extends Row> actions,
+            Callback<R> callback) throws IOException, InterruptedException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+            byte[] value, Put put) throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+            CompareOp compareOp, byte[] value, Put put) throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+            byte[] value, Delete delete) throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+            CompareOp compareOp, byte[] value, Delete delete)
+            throws IOException {
+        // TODO Auto-generated method stub
+        return false;
+    }
+
+    @Override
+    public void mutateRow(RowMutations rm) throws IOException {
+        // TODO Auto-generated method stub
+    }
+
+    @Override
+    public Result append(Append append) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public Result increment(Increment increment) throws IOException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public long incrementColumnValue(byte[] row, byte[] family,
+            byte[] qualifier, long amount) throws IOException {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public long incrementColumnValue(byte[] row, byte[] family,
+            byte[] qualifier, long amount, Durability durability)
+            throws IOException {
+        // TODO Auto-generated method stub
+        return 0;
+    }
 
+    @Override
+    public CoprocessorRpcChannel coprocessorService(byte[] row) {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public <T extends Service, R> Map<byte[], R> coprocessorService(
+            Class<T> service, byte[] startKey, byte[] endKey,
+            Call<T, R> callable) throws ServiceException, Throwable {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public <T extends Service, R> void coprocessorService(Class<T> service,
+            byte[] startKey, byte[] endKey, Call<T, R> callable,
+            Callback<R> callback) throws ServiceException, Throwable {
+        // TODO Auto-generated method stub
+    }
+
+    @Override
+    public <R extends Message> Map<byte[], R> batchCoprocessorService(
+            MethodDescriptor methodDescriptor, Message request,
+            byte[] startKey, byte[] endKey, R responsePrototype)
+            throws ServiceException, Throwable {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public <R extends Message> void batchCoprocessorService(
+            MethodDescriptor methodDescriptor, Message request,
+            byte[] startKey, byte[] endKey, R responsePrototype,
+            Callback<R> callback) throws ServiceException, Throwable {
+        // TODO Auto-generated method stub
+    }
+
+    @Override
+    public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
+            CompareOp compareOp, byte[] value, RowMutations mutation)
+            throws IOException {
+        // TODO Auto-generated method stub
+        return false;
     }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d2c16533/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
index 3a43068..dcab73d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
@@ -14,7 +14,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import java.io.IOException;
 import java.util.List;
 
-public interface PhoenixTransactionalTable {
+public interface PhoenixTransactionalTable extends HTableInterface {
 
     /**
      * Transaction version of {@link HTableInterface#get(Get get)}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d2c16533/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index 0823f89..50ea600 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -2,18 +2,34 @@ package org.apache.phoenix.transaction;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.tephra.hbase.TransactionAwareHTable;
 
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
+
 public class TephraTransactionTable implements PhoenixTransactionalTable {
 
     private TransactionAwareHTable transactionAwareHTable;
@@ -127,4 +143,161 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
         transactionAwareHTable.close();
     }
 
+    @Override
+    public long incrementColumnValue(byte[] row, byte[] family,
+            byte[] qualifier, long amount, boolean writeToWAL)
+            throws IOException {
+        return transactionAwareHTable.incrementColumnValue(row, family, qualifier, amount, writeToWAL);
+    }
+
+    @Override
+    public Boolean[] exists(List<Get> gets) throws IOException {
+        return transactionAwareHTable.exists(gets);
+    }
+
+    @Override
+    public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
+        transactionAwareHTable.setAutoFlush(autoFlush, clearBufferOnFail);
+    }
+
+    @Override
+    public void setAutoFlushTo(boolean autoFlush) {
+        transactionAwareHTable.setAutoFlush(autoFlush);
+    }
+
+    @Override
+    public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
+        return transactionAwareHTable.getRowOrBefore(row, family);
+    }
+
+    @Override
+    public TableName getName() {
+        return transactionAwareHTable.getName();
+    }
+
+    @Override
+    public boolean[] existsAll(List<Get> gets) throws IOException {
+        return transactionAwareHTable.existsAll(gets);
+    }
+
+    @Override
+    public void batch(List<? extends Row> actions, Object[] results)
+            throws IOException, InterruptedException {
+        transactionAwareHTable.batch(actions, results);
+    }
+
+    @Override
+    public Object[] batch(List<? extends Row> actions) throws IOException,
+            InterruptedException {
+        return transactionAwareHTable.batch(actions);
+    }
+
+    @Override
+    public <R> void batchCallback(List<? extends Row> actions,
+            Object[] results, Callback<R> callback) throws IOException,
+            InterruptedException {
+        transactionAwareHTable.batchCallback(actions, results, callback);
+    }
+
+    @Override
+    public <R> Object[] batchCallback(List<? extends Row> actions,
+            Callback<R> callback) throws IOException, InterruptedException {
+        return transactionAwareHTable.batchCallback(actions, callback);
+    }
+
+    @Override
+    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+            byte[] value, Put put) throws IOException {
+        return transactionAwareHTable.checkAndPut(row, family, qualifier, value, put);
+    }
+
+    @Override
+    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+            CompareOp compareOp, byte[] value, Put put) throws IOException {
+        return transactionAwareHTable.checkAndPut(row, family, qualifier, compareOp, value, put);
+    }
+
+    @Override
+    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+            byte[] value, Delete delete) throws IOException {
+        return transactionAwareHTable.checkAndDelete(row, family, qualifier, value, delete);
+    }
+
+    @Override
+    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+            CompareOp compareOp, byte[] value, Delete delete)
+            throws IOException {
+        return transactionAwareHTable.checkAndDelete(row, family, qualifier, compareOp, value, delete);
+    }
+
+    @Override
+    public void mutateRow(RowMutations rm) throws IOException {
+        transactionAwareHTable.mutateRow(rm);
+    }
+
+    @Override
+    public Result append(Append append) throws IOException {
+        return transactionAwareHTable.append(append);
+    }
+
+    @Override
+    public Result increment(Increment increment) throws IOException {
+        return transactionAwareHTable.increment(increment);
+    }
+
+    @Override
+    public long incrementColumnValue(byte[] row, byte[] family,
+            byte[] qualifier, long amount) throws IOException {
+        return transactionAwareHTable.incrementColumnValue(row, family, qualifier, amount);
+    }
+
+    @Override
+    public long incrementColumnValue(byte[] row, byte[] family,
+            byte[] qualifier, long amount, Durability durability)
+            throws IOException {
+        return transactionAwareHTable.incrementColumnValue(row, family, qualifier, amount, durability);
+    }
+
+    @Override
+    public CoprocessorRpcChannel coprocessorService(byte[] row) {
+        return transactionAwareHTable.coprocessorService(row);
+    }
+
+    @Override
+    public <T extends Service, R> Map<byte[], R> coprocessorService(
+            Class<T> service, byte[] startKey, byte[] endKey,
+            Call<T, R> callable) throws ServiceException, Throwable {
+        return transactionAwareHTable.coprocessorService(service, startKey, endKey, callable);
+    }
+
+    @Override
+    public <T extends Service, R> void coprocessorService(Class<T> service,
+            byte[] startKey, byte[] endKey, Call<T, R> callable,
+            Callback<R> callback) throws ServiceException, Throwable {
+        transactionAwareHTable.coprocessorService(service, startKey, endKey, callable, callback);
+    }
+
+    @Override
+    public <R extends Message> Map<byte[], R> batchCoprocessorService(
+            MethodDescriptor methodDescriptor, Message request,
+            byte[] startKey, byte[] endKey, R responsePrototype)
+            throws ServiceException, Throwable {
+        return transactionAwareHTable.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype);
+    }
+
+    @Override
+    public <R extends Message> void batchCoprocessorService(
+            MethodDescriptor methodDescriptor, Message request,
+            byte[] startKey, byte[] endKey, R responsePrototype,
+            Callback<R> callback) throws ServiceException, Throwable {
+        transactionAwareHTable.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, callback);
+    }
+
+    @Override
+    public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
+            CompareOp compareOp, byte[] value, RowMutations mutation)
+            throws IOException {
+        return transactionAwareHTable.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
+    }
+
 }


[37/46] phoenix git commit: Merge remote-tracking branch 'upstream/master'

Posted by td...@apache.org.
Merge remote-tracking branch 'upstream/master'


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9f38170a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9f38170a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9f38170a

Branch: refs/heads/omid
Commit: 9f38170a41eb1fe2cc49d42b4c99aff9b05d5710
Parents: 6f7d42f f51c0db
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Mon May 8 14:33:07 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Mon May 8 14:33:07 2017 +0300

----------------------------------------------------------------------
 phoenix-assembly/pom.xml                        |   2 +-
 phoenix-client/pom.xml                          |   2 +-
 phoenix-core/pom.xml                            |   2 +-
 ...ReplayWithIndexWritesAndCompressedWALIT.java |   2 +-
 .../apache/phoenix/end2end/AlterTableIT.java    |   9 +-
 .../org/apache/phoenix/end2end/ArrayIT.java     |   2 +-
 .../phoenix/end2end/AutomaticRebuildIT.java     |   4 +-
 .../phoenix/end2end/CoalesceFunctionIT.java     |  12 +-
 .../apache/phoenix/end2end/CreateTableIT.java   |   2 +-
 .../end2end/IndexToolForPartialBuildIT.java     |   4 +-
 .../phoenix/end2end/NthValueFunctionIT.java     |  99 +++++++
 .../end2end/SequenceBulkAllocationIT.java       |  44 ++--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  44 ++--
 .../phoenix/end2end/SortMergeJoinMoreIT.java    | 135 ++++++++++
 .../phoenix/end2end/SpillableGroupByIT.java     |   2 +-
 .../phoenix/end2end/StatsCollectorIT.java       |  11 +-
 .../phoenix/end2end/StoreNullsPropIT.java       |   2 +-
 .../end2end/SystemTablePermissionsIT.java       | 263 +++++++++++++++++++
 .../apache/phoenix/end2end/UpsertSelectIT.java  |  39 ++-
 .../apache/phoenix/end2end/index/IndexIT.java   |   5 +-
 .../phoenix/end2end/index/IndexMetadataIT.java  |   6 +-
 .../phoenix/end2end/index/IndexTestUtil.java    |   2 +-
 .../phoenix/end2end/index/LocalIndexIT.java     |  47 +++-
 .../org/apache/phoenix/rpc/UpdateCacheIT.java   |   2 +-
 .../phoenix/tx/ParameterizedTransactionIT.java  |   8 +-
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   |  10 +
 .../apache/phoenix/compile/JoinCompiler.java    |   6 +-
 .../phoenix/compile/SubselectRewriter.java      | 137 +++++++++-
 .../apache/phoenix/compile/UpsertCompiler.java  |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java       | 213 +++++++--------
 .../phoenix/coprocessor/MetaDataProtocol.java   |   4 +-
 .../phoenix/exception/SQLExceptionCode.java     |   1 +
 .../apache/phoenix/execute/DelegateHTable.java  |  22 +-
 .../phoenix/execute/DelegateQueryPlan.java      |   4 +
 .../phoenix/execute/SortMergeJoinPlan.java      |   8 +
 .../FirstLastValueServerAggregator.java         |   2 -
 .../org/apache/phoenix/hbase/index/Indexer.java |  66 ++---
 .../write/ParallelWriterIndexCommitter.java     |   8 +-
 .../TrackingParallelWriterIndexCommitter.java   |   8 +-
 .../apache/phoenix/index/IndexMaintainer.java   |   3 +
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   7 +-
 .../query/ConnectionQueryServicesImpl.java      |  60 ++++-
 .../apache/phoenix/schema/MetaDataClient.java   |  24 +-
 .../java/org/apache/phoenix/schema/PTable.java  |  10 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  15 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  17 ++
 .../org/apache/phoenix/util/RepairUtil.java     |  11 +-
 .../org/apache/phoenix/util/UpgradeUtil.java    |  75 +++++-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |   2 +-
 .../phoenix/compile/QueryCompilerTest.java      | 187 +++++++++++++
 .../recovery/TestPerRegionIndexWriteCache.java  |   2 +-
 .../query/ConnectionQueryServicesImplTest.java  |  73 +++++
 phoenix-flume/pom.xml                           |   2 +-
 phoenix-hive/pom.xml                            |   2 +-
 phoenix-kafka/pom.xml                           |   2 +-
 phoenix-pherf/pom.xml                           |   2 +-
 phoenix-pig/pom.xml                             |   2 +-
 phoenix-queryserver-client/pom.xml              |   2 +-
 phoenix-queryserver/pom.xml                     |   2 +-
 phoenix-server/pom.xml                          |   2 +-
 phoenix-spark/pom.xml                           |   2 +-
 phoenix-spark/src/it/resources/globalSetup.sql  |   3 +-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |  27 +-
 .../phoenix/spark/DataFrameFunctions.scala      |  19 +-
 .../apache/phoenix/spark/DefaultSource.scala    |   2 +-
 .../org/apache/phoenix/spark/PhoenixRDD.scala   |   4 +-
 phoenix-tracing-webapp/pom.xml                  |   2 +-
 pom.xml                                         |   6 +-
 68 files changed, 1484 insertions(+), 324 deletions(-)
----------------------------------------------------------------------



[16/46] phoenix git commit: PHOENIX-3756 Handle users lacking ADMIN for the SYSTEM namespace

Posted by td...@apache.org.
PHOENIX-3756 Handle users lacking ADMIN for the SYSTEM namespace


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8b3cc71e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8b3cc71e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8b3cc71e

Branch: refs/heads/omid
Commit: 8b3cc71eb9ae5972516435629591dd2ab94df50d
Parents: 2c53fc9
Author: Josh Elser <el...@apache.org>
Authored: Thu Mar 30 15:13:57 2017 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Wed Apr 5 17:48:20 2017 -0400

----------------------------------------------------------------------
 .../end2end/SystemTablePermissionsIT.java       | 263 +++++++++++++++++++
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   3 +
 .../query/ConnectionQueryServicesImpl.java      |  56 +++-
 .../query/ConnectionQueryServicesImplTest.java  |  73 +++++
 4 files changed, 383 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b3cc71e/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
new file mode 100644
index 0000000..9f213c8
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.phoenix.query.QueryServices;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test that verifies a user can read Phoenix tables with a minimal set of permissions.
+ */
+public class SystemTablePermissionsIT {
+    private static String SUPERUSER;
+
+    private static final Set<String> PHOENIX_SYSTEM_TABLES = new HashSet<>(Arrays.asList(
+            "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
+                "SYSTEM.MUTEX"));
+    // PHOENIX-XXXX SYSTEM.MUTEX isn't being created in the SYSTEM namespace as it should be.
+    private static final Set<String> PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new HashSet<>(
+            Arrays.asList("SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS", "SYSTEM:FUNCTION",
+                "SYSTEM.MUTEX"));
+
+    private static final String TABLE_NAME =
+        SystemTablePermissionsIT.class.getSimpleName().toUpperCase();
+    private static final int NUM_RECORDS = 5;
+
+    private HBaseTestingUtility testUtil = null;
+    private Properties clientProperties = null;
+
+    @BeforeClass
+    public static void setup() throws Exception {
+        SUPERUSER = System.getProperty("user.name");
+    }
+
+    private static void setCommonConfigProperties(Configuration conf) {
+        conf.set("hbase.coprocessor.master.classes",
+            "org.apache.hadoop.hbase.security.access.AccessController");
+        conf.set("hbase.coprocessor.region.classes",
+            "org.apache.hadoop.hbase.security.access.AccessController");
+        conf.set("hbase.coprocessor.regionserver.classes",
+            "org.apache.hadoop.hbase.security.access.AccessController");
+        conf.set("hbase.security.exec.permission.checks", "true");
+        conf.set("hbase.security.authorization", "true");
+        conf.set("hbase.superuser", SUPERUSER);
+    }
+
+    @After
+    public void cleanup() throws Exception {
+        if (null != testUtil) {
+          testUtil.shutdownMiniCluster();
+          testUtil = null;
+        }
+    }
+
+    @Test
+    public void testSystemTablePermissions() throws Exception {
+        testUtil = new HBaseTestingUtility();
+        clientProperties = new Properties();
+        Configuration conf = testUtil.getConfiguration();
+        setCommonConfigProperties(conf);
+        conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "false");
+        clientProperties.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "false");
+        testUtil.startMiniCluster(1);
+        final UserGroupInformation superUser = UserGroupInformation.createUserForTesting(
+            SUPERUSER, new String[0]);
+        final UserGroupInformation regularUser = UserGroupInformation.createUserForTesting(
+            "user", new String[0]);
+
+        superUser.doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                createTable();
+                readTable();
+                return null;
+            }
+        });
+
+        Set<String> tables = getHBaseTables();
+        assertTrue("HBase tables do not include expected Phoenix tables: " + tables,
+            tables.containsAll(PHOENIX_SYSTEM_TABLES));
+
+        // Grant permission to the system tables for the unprivileged user
+        superUser.doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                try {
+                    grantPermissions(regularUser.getShortUserName(), PHOENIX_SYSTEM_TABLES,
+                        Action.EXEC, Action.READ);
+                    grantPermissions(regularUser.getShortUserName(),
+                        Collections.singleton(TABLE_NAME), Action.READ);
+                } catch (Throwable e) {
+                    if (e instanceof Exception) {
+                        throw (Exception) e;
+                    } else {
+                        throw new Exception(e);
+                    }
+                }
+                return null;
+            }
+        });
+
+        // Make sure that the unprivileged user can read the table
+        regularUser.doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                // We expect this to not throw an error
+                readTable();
+                return null;
+            }
+        });
+    }
+
+    @Test
+    public void testNamespaceMappedSystemTables() throws Exception {
+        testUtil = new HBaseTestingUtility();
+        clientProperties = new Properties();
+        Configuration conf = testUtil.getConfiguration();
+        setCommonConfigProperties(conf);
+        testUtil.getConfiguration().set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+        clientProperties.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+        testUtil.startMiniCluster(1);
+        final UserGroupInformation superUser =
+            UserGroupInformation.createUserForTesting(SUPERUSER, new String[0]);
+        final UserGroupInformation regularUser =
+            UserGroupInformation.createUserForTesting("user", new String[0]);
+
+        superUser.doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                createTable();
+                readTable();
+                return null;
+            }
+        });
+
+        Set<String> tables = getHBaseTables();
+        assertTrue("HBase tables do not include expected Phoenix tables: " + tables,
+            tables.containsAll(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES));
+
+        // Grant permission to the system tables for the unprivileged user
+        // An unprivileged user should only need to be able to Read and eXecute on them.
+        superUser.doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                try {
+                    grantPermissions(regularUser.getShortUserName(),
+                        PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, Action.EXEC, Action.READ);
+                    grantPermissions(regularUser.getShortUserName(),
+                        Collections.singleton(TABLE_NAME), Action.READ);
+                } catch (Throwable e) {
+                    if (e instanceof Exception) {
+                        throw (Exception) e;
+                    } else {
+                        throw new Exception(e);
+                    }
+                }
+                return null;
+            }
+        });
+
+        regularUser.doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                // We expect this to not throw an error
+                readTable();
+                return null;
+            }
+        });
+    }
+
+    private String getJdbcUrl() {
+        return "jdbc:phoenix:localhost:" + testUtil.getZkCluster().getClientPort() + ":/hbase";
+    }
+
+    private void createTable() throws SQLException {
+        try (Connection conn = DriverManager.getConnection(getJdbcUrl(), clientProperties);
+            Statement stmt = conn.createStatement();) {
+            assertFalse(stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME));
+            assertFalse(stmt.execute("CREATE TABLE " + TABLE_NAME
+                + "(pk INTEGER not null primary key, data VARCHAR)"));
+            try (PreparedStatement pstmt = conn.prepareStatement("UPSERT INTO "
+                + TABLE_NAME + " values(?, ?)")) {
+                for (int i = 0; i < NUM_RECORDS; i++) {
+                    pstmt.setInt(1, i);
+                    pstmt.setString(2, Integer.toString(i));
+                    assertEquals(1, pstmt.executeUpdate());
+                }
+            }
+            conn.commit();
+        }
+    }
+
+    private void readTable() throws SQLException {
+        try (Connection conn = DriverManager.getConnection(getJdbcUrl(), clientProperties);
+            Statement stmt = conn.createStatement()) {
+            ResultSet rs = stmt.executeQuery("SELECT pk, data FROM " + TABLE_NAME);
+            assertNotNull(rs);
+            int i = 0;
+            while (rs.next()) {
+                assertEquals(i, rs.getInt(1));
+                assertEquals(Integer.toString(i), rs.getString(2));
+                i++;
+            }
+            assertEquals(NUM_RECORDS, i);
+        }
+    }
+
+    private void grantPermissions(String toUser, Set<String> tablesToGrant, Action... actions)
+            throws Throwable {
+          for (String table : tablesToGrant) {
+              AccessControlClient.grant(testUtil.getConnection(), TableName.valueOf(table), toUser,
+                  null, null, actions);
+          }
+    }
+
+    private Set<String> getHBaseTables() throws IOException {
+        Set<String> tables = new HashSet<>();
+        for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) {
+            tables.add(tn.getNameAsString());
+        }
+        return tables;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b3cc71e/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index e3a206c..e061406 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -30,6 +30,7 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.ColumnProjector;
@@ -97,6 +98,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
     public static final String SYSTEM_CATALOG = SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"";
     public static final String SYSTEM_CATALOG_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA,
             SYSTEM_CATALOG_TABLE);
+    public static final TableName SYSTEM_CATALOG_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_CATALOG_NAME);
     public static final byte[] SYSTEM_CATALOG_NAME_BYTES = Bytes.toBytes(SYSTEM_CATALOG_NAME);
     public static final String SYSTEM_STATS_TABLE = "STATS";
     public static final String SYSTEM_STATS_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_STATS_TABLE);
@@ -305,6 +307,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
 
     public static final String SYSTEM_MUTEX_TABLE_NAME = "MUTEX";
     public static final String SYSTEM_MUTEX_NAME = SchemaUtil.getTableName(QueryConstants.SYSTEM_SCHEMA_NAME, SYSTEM_MUTEX_TABLE_NAME);
+    public static final TableName SYSTEM_MUTEX_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_MUTEX_NAME);
     public static final byte[] SYSTEM_MUTEX_NAME_BYTES = Bytes.toBytes(SYSTEM_MUTEX_NAME);
     public static final byte[] SYSTEM_MUTEX_FAMILY_NAME_BYTES = TABLE_FAMILY_BYTES;
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b3cc71e/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index ee9f3d0..b402274 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -121,6 +121,7 @@ import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -253,6 +254,7 @@ import com.google.common.base.Throwables;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
@@ -1014,7 +1016,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 QueryServicesOptions.DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE);
     }
 
-    private NamespaceDescriptor ensureNamespaceCreated(String schemaName) throws SQLException {
+    void ensureNamespaceCreated(String schemaName) throws SQLException {
         SQLException sqlE = null;
         try (HBaseAdmin admin = getAdmin()) {
             NamespaceDescriptor namespaceDescriptor = null;
@@ -1027,13 +1029,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 namespaceDescriptor = NamespaceDescriptor.create(schemaName).build();
                 admin.createNamespace(namespaceDescriptor);
             }
-            return namespaceDescriptor;
+            return;
         } catch (IOException e) {
             sqlE = ServerUtil.parseServerException(e);
         } finally {
             if (sqlE != null) { throw sqlE; }
         }
-        return null; // will never make it here
     }
 
     /**
@@ -2445,6 +2446,16 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP) {
                                         ConnectionQueryServicesImpl.this.upgradeRequired.set(true);
                                     }
+                                } catch (PhoenixIOException e) {
+                                    if (!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), AccessDeniedException.class))) {
+                                        // Pass
+                                        logger.warn("Could not check for Phoenix SYSTEM tables, assuming they exist and are properly configured");
+                                        checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, getProps()).getName());
+                                        success = true;
+                                    } else {
+                                        initializationException = e;
+                                    }
+                                    return null;
                                 }
                                 if (!ConnectionQueryServicesImpl.this.upgradeRequired.get()) {
                                     createOtherSystemTables(metaConnection);
@@ -2497,8 +2508,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     
     private void createSysMutexTable(HBaseAdmin admin) throws IOException, SQLException {
         try {
-            HTableDescriptor tableDesc = new HTableDescriptor(
-                    TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES));
+            final TableName mutexTableName = TableName.valueOf(
+                PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES);
+            List<TableName> systemTables = getSystemTableNames(admin);
+            if (systemTables.contains(mutexTableName)) {
+                logger.debug("System mutex table already appears to exist, not creating it");
+                return;
+            }
+            HTableDescriptor tableDesc = new HTableDescriptor(mutexTableName);
             HColumnDescriptor columnDesc = new HColumnDescriptor(
                     PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES);
             columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after some time
@@ -2516,6 +2533,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         }
     }
 
+    List<TableName> getSystemTableNames(HBaseAdmin admin) throws IOException {
+        return Lists.newArrayList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"));
+    }
+
     private void createOtherSystemTables(PhoenixConnection metaConnection) throws SQLException {
         try {
             metaConnection.createStatement().execute(QueryConstants.CREATE_SEQUENCE_METADATA);
@@ -3081,23 +3102,34 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             }
         }
     }
-    
-    private void ensureSystemTablesUpgraded(ReadOnlyProps props)
+
+    void ensureSystemTablesUpgraded(ReadOnlyProps props)
             throws SQLException, IOException, IllegalArgumentException, InterruptedException {
         if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, props)) { return; }
         HTableInterface metatable = null;
         try (HBaseAdmin admin = getAdmin()) {
-            ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME);
+            // Namespace-mapping is enabled at this point.
+            try {
+                ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME);
+            } catch (PhoenixIOException e) {
+                // We could either:
+                // 1) Not access the NS descriptor. The NS may or may not exist at this point.
+                // 2) We could not create the NS
+                // Regardless of the case 1 or 2, if the NS does not exist, we will error expectedly
+                // below. If the NS does exist and is mapped, the below check will exit gracefully.
+            }
             
-            List<TableName> tableNames = Lists.newArrayList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"));
+            List<TableName> tableNames = getSystemTableNames(admin);
+            // No tables exist matching "SYSTEM\..*", they are all already in "SYSTEM:.*"
             if (tableNames.size() == 0) { return; }
+            // Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:"
             if (tableNames.size() > 5) {
                 logger.warn("Expected 5 system tables but found " + tableNames.size() + ":" + tableNames);
             }
             byte[] mappedSystemTable = SchemaUtil
                     .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props).getName();
             metatable = getTable(mappedSystemTable);
-            if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)) {
+            if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) {
                 if (!admin.tableExists(mappedSystemTable)) {
                     UpgradeUtil.mapTableToNamespace(admin, metatable,
                             PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, props, null, PTableType.SYSTEM,
@@ -3106,9 +3138,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                             PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null,
                             MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
                 }
-                tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+                tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
             }
-            tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME);
+            tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME);
             for (TableName table : tableNames) {
                 UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), props, null, PTableType.SYSTEM,
                         null);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b3cc71e/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
new file mode 100644
index 0000000..73ddd2d
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.query;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.phoenix.exception.PhoenixIOException;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.Test;
+
+public class ConnectionQueryServicesImplTest {
+    private static final PhoenixIOException PHOENIX_IO_EXCEPTION = new PhoenixIOException(new Exception("Test exception"));
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void testExceptionHandlingOnSystemNamespaceCreation() throws Exception {
+        ConnectionQueryServicesImpl cqs = mock(ConnectionQueryServicesImpl.class);
+        // Invoke the real methods for these two calls
+        when(cqs.createSchema(any(List.class), anyString())).thenCallRealMethod();
+        doCallRealMethod().when(cqs).ensureSystemTablesUpgraded(any(ReadOnlyProps.class));
+
+        // Spoof out this call so that ensureSystemTablesUpgrade() will return-fast.
+        when(cqs.getSystemTableNames(any(HBaseAdmin.class))).thenReturn(Collections.<TableName> emptyList());
+
+        // Throw a special exception to check on later
+        doThrow(PHOENIX_IO_EXCEPTION).when(cqs).ensureNamespaceCreated(anyString());
+
+        // Make sure that ensureSystemTablesUpgraded will try to migrate the system tables.
+        Map<String,String> props = new HashMap<>();
+        props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+        cqs.ensureSystemTablesUpgraded(new ReadOnlyProps(props));
+
+        // Should be called after upgradeSystemTables()
+        // Proves that execution proceeded
+        verify(cqs).getSystemTableNames(any(HBaseAdmin.class));
+
+        try {
+            // Verifies that the exception is propagated back to the caller
+            cqs.createSchema(Collections.<Mutation> emptyList(), "");
+        } catch (PhoenixIOException e) {
+            assertEquals(PHOENIX_IO_EXCEPTION, e);
+        }
+    }
+}


[12/46] phoenix git commit: PHOENIX-3693 Upgrade to Tephra 0.11.0

Posted by td...@apache.org.
PHOENIX-3693 Upgrade to Tephra 0.11.0


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7050b924
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7050b924
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7050b924

Branch: refs/heads/omid
Commit: 7050b924350655558e95c6f93246a47c76203174
Parents: 8093d10
Author: James Taylor <ja...@apache.org>
Authored: Tue Mar 21 13:34:54 2017 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Tue Mar 21 13:34:54 2017 -0700

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7050b924/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 1b74aa1..248a244 100644
--- a/pom.xml
+++ b/pom.xml
@@ -100,7 +100,7 @@
     <joni.version>2.1.2</joni.version>
     <avatica.version>1.9.0</avatica.version>
     <jettyVersion>8.1.7.v20120910</jettyVersion>
-    <tephra.version>0.9.0-incubating</tephra.version>
+    <tephra.version>0.11.0-incubating</tephra.version>
     <spark.version>2.0.2</spark.version>
     <scala.version>2.11.8</scala.version>
     <scala.binary.version>2.11</scala.binary.version>


[28/46] phoenix git commit: PHOENIX-3801 Demote extremely verbose logs in ParallelWriterIndexCommitter from DEBUG to TRACE

Posted by td...@apache.org.
PHOENIX-3801 Demote extremely verbose logs in ParallelWriterIndexCommitter from DEBUG to TRACE


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/679ff21b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/679ff21b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/679ff21b

Branch: refs/heads/omid
Commit: 679ff21b78968a010db03c9428e60e7e00acb86e
Parents: 5bd7f79
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed Apr 19 18:43:51 2017 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Apr 20 18:20:08 2017 -0700

----------------------------------------------------------------------
 .../hbase/index/write/ParallelWriterIndexCommitter.java      | 8 ++++----
 .../write/recovery/TrackingParallelWriterIndexCommitter.java | 8 ++++----
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/679ff21b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 1549d26..7510c5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -143,8 +143,8 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                     // early exit, if that's the case
                     throwFailureIfDone();
 
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Writing index update:" + mutations + " to table: " + tableReference);
+                    if (LOG.isTraceEnabled()) {
+                        LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
                     }
                     HTableInterface table = null;
                     try {
@@ -158,8 +158,8 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                                 return null;
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow way
-                                if (LOG.isDebugEnabled()) {
-                                    LOG.debug("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
+                                if (LOG.isTraceEnabled()) {
+                                    LOG.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
                                             + ignord);
                                 }
                             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/679ff21b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
index 4f1a076..074d0b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -159,15 +159,15 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                                 return Boolean.TRUE;
                             } catch (IOException ignord) {
                                 // when it's failed we fall back to the standard & slow way
-                                if (LOG.isDebugEnabled()) {
-                                    LOG.debug("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
+                                if (LOG.isTraceEnabled()) {
+                                    LOG.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
                                             + ignord);
                                 }
                             }
                         }
 
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("Writing index update:" + mutations + " to table: " + tableReference);
+                        if (LOG.isTraceEnabled()) {
+                            LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
                         }
 
                         table = factory.getTable(tableReference.get());


[08/46] phoenix git commit: PHOENIX-3726 Addendum to use modifiable list

Posted by td...@apache.org.
PHOENIX-3726 Addendum to use modifiable list


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e34431c8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e34431c8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e34431c8

Branch: refs/heads/omid
Commit: e34431c8fd392b67ba5fda556627c96b6554bc11
Parents: 66a152a
Author: Samarth <sa...@salesforce.com>
Authored: Thu Mar 9 13:56:49 2017 -0800
Committer: Samarth <sa...@salesforce.com>
Committed: Thu Mar 9 13:56:49 2017 -0800

----------------------------------------------------------------------
 .../org/apache/phoenix/query/ConnectionQueryServicesImpl.java     | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e34431c8/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 751f2f2..59252ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -3085,8 +3085,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         try (HBaseAdmin admin = getAdmin()) {
             ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME);
             
-             List<TableName> tableNames = Arrays
-                    .asList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"));
+            List<TableName> tableNames = Lists.newArrayList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"));
             if (tableNames.size() == 0) { return; }
             if (tableNames.size() > 5) {
                 logger.warn("Expected 5 system tables but found " + tableNames.size() + ":" + tableNames);


[04/46] phoenix git commit: initial version of Tephra implementation

Posted by td...@apache.org.
initial version of Tephra implementation


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cea251cf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cea251cf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cea251cf

Branch: refs/heads/omid
Commit: cea251cfcb9699a90d10dfe82626c264b9016bc4
Parents: acfc9d5
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Tue Feb 14 15:57:23 2017 +0200
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Tue Feb 14 15:57:23 2017 +0200

----------------------------------------------------------------------
 .../transaction/OmidTransactionContext.java     |   8 +-
 .../transaction/OmidTransactionTable.java       |   8 +-
 .../transaction/PhoenixTransactionContext.java  |  10 +-
 .../transaction/PhoenixTransactionalTable.java  |   5 -
 .../transaction/TephraTransactionContext.java   | 256 +++++++++++++++++--
 .../transaction/TephraTransactionTable.java     |  79 +++---
 6 files changed, 265 insertions(+), 101 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cea251cf/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index bc5b05b..937ac14 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -26,13 +26,7 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public void abort(SQLException e) throws SQLException {
-        // TODO Auto-generated method stub
-
-    }
-
-    @Override
-    public void checkpoint() throws SQLException {
+    public void checkpoint(boolean hasUncommittedData) throws SQLException {
         // TODO Auto-generated method stub
 
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cea251cf/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index f15fdd3..725fe16 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -15,7 +15,7 @@ import org.apache.hadoop.hbase.client.Scan;
 
 public class OmidTransactionTable implements PhoenixTransactionalTable {
 
-    public OmidTransactionTable(PhoenixTransactionContext ctx) {
+    public OmidTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) {
         // TODO Auto-generated constructor stub
     }
 
@@ -99,12 +99,6 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
     }
 
     @Override
-    public HTableInterface getHTable() {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
     public void setAutoFlush(boolean autoFlush) {
         // TODO Auto-generated method stub
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cea251cf/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index f07640e..87b68f9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -29,18 +29,10 @@ public interface PhoenixTransactionContext {
     public void abort() throws SQLException;
 
     /**
-     * Rollback a transaction
-     *
-     * @param e
-     * @throws SQLException
-     */
-    public void abort(SQLException e) throws SQLException;
-
-    /**
      * Create a checkpoint in a transaction as defined in [TEPHRA-96]
      * @throws SQLException
      */
-    public void checkpoint() throws SQLException;
+    public void checkpoint(boolean hasUncommittedData) throws SQLException;
 
     /**
      * Commit DDL to guarantee that no transaction started before create index

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cea251cf/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
index ff2632c..3a43068 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
@@ -101,11 +101,6 @@ public interface PhoenixTransactionalTable {
     public void delete(List<Delete> deletes) throws IOException;
 
     /**
-     * Return the underling htable
-     */
-    public HTableInterface getHTable();
-
-    /**
      * Delegates to {@link HTable#setAutoFlush(boolean autoFlush)}
      */
     public void setAutoFlush(boolean autoFlush);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cea251cf/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 17c70f0..81c9fd1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -1,83 +1,285 @@
 package org.apache.phoenix.transaction;
 
 import java.sql.SQLException;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.PTable;
+import org.apache.tephra.Transaction;
+import org.apache.tephra.TransactionAware;
+import org.apache.tephra.TransactionConflictException;
+import org.apache.tephra.TransactionContext;
+import org.apache.tephra.TransactionFailureException;
+import org.apache.tephra.TransactionSystemClient;
+import org.apache.tephra.Transaction.VisibilityLevel;
+import org.apache.tephra.visibility.FenceWait;
+import org.apache.tephra.visibility.VisibilityFence;
+
+import com.google.common.collect.Lists;
 
 public class TephraTransactionContext implements PhoenixTransactionContext {
 
-    @Override
-    public void begin() throws SQLException {
-        // TODO Auto-generated method stub
+    private final List<TransactionAware> txAwares;
+    private final TransactionContext txContext;
+    private Transaction tx;
+    private TransactionSystemClient txServiceClient;
+    private TransactionFailureException e;
 
-    }
+    public TephraTransactionContext(PhoenixTransactionContext ctx, PhoenixConnection connection, boolean threadSafe) {
 
-    @Override
-    public void commit() throws SQLException {
-        // TODO Auto-generated method stub
+        this.txServiceClient = connection.getQueryServices().getTransactionSystemClient(); // TODO Should be wrapped for Omid side usage
 
+        assert(ctx instanceof TephraTransactionContext);
+        TephraTransactionContext tephraTransactionContext = (TephraTransactionContext) ctx;
+
+        if (threadSafe) {
+            this.tx = tephraTransactionContext.getTransaction();
+            this.txAwares = Lists.newArrayList();
+            this.txContext = null;
+        } else {
+            this.txAwares = Collections.emptyList();
+            if (ctx == null) {
+                this.txContext = new TransactionContext(txServiceClient);
+            } else {
+                this.txContext = tephraTransactionContext.getContext();
+            }
+        }
+
+        this.e = null;
     }
 
     @Override
-    public void abort() throws SQLException {
-        // TODO Auto-generated method stub
+    public void begin() throws SQLException {
+        if (txContext == null) {
+            throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build().buildException();
+        }
 
+        try {
+            txContext.start();
+        } catch (TransactionFailureException e) {
+            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
+            .setMessage(e.getMessage())
+            .setRootCause(e)
+            .build().buildException();
+        }
     }
 
     @Override
-    public void abort(SQLException e) throws SQLException {
-        // TODO Auto-generated method stub
-
+    public void commit() throws SQLException {
+        try {
+            assert(txContext != null);
+            txContext.finish();
+        } catch (TransactionFailureException e) {
+            this.e = e;
+            if (e instanceof TransactionConflictException) { 
+                throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION)
+                    .setMessage(e.getMessage())
+                    .setRootCause(e)
+                    .build().buildException();
+            }
+            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
+                .setMessage(e.getMessage())
+                .setRootCause(e)
+                .build().buildException();
+        }
     }
 
     @Override
-    public void checkpoint() throws SQLException {
-        // TODO Auto-generated method stub
+    public void abort() throws SQLException {
+        try {
+            if (e != null) {
+                txContext.abort(e);
+                e = null;
+            } else {
+                txContext.abort();
+            }
+        } catch (TransactionFailureException e) {
+            this.e = null;
+            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED)
+                .setMessage(e.getMessage())
+                .setRootCause(e)
+                .build().buildException();
+        }
+    }
 
+    @Override
+    public void checkpoint(boolean hasUncommittedData) throws SQLException {
+        if (hasUncommittedData) {
+            try {
+                if (txContext == null) {
+                    tx = txServiceClient.checkpoint(tx);
+                }  else {
+                    assert(txContext != null);
+                    txContext.checkpoint();
+                    tx = txContext.getCurrentTransaction();
+                }
+            } catch (TransactionFailureException e) {
+                throw new SQLException(e);
+            }
+        }
+  
+        if (txContext == null) {
+            tx.setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
+        }
+        else {
+            assert(txContext != null);
+            txContext.getCurrentTransaction().setVisibility(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT);
+        }
     }
 
     @Override
     public void commitDDLFence(PTable dataTable) throws SQLException,
             InterruptedException, TimeoutException {
-        // TODO Auto-generated method stub
-
+        byte[] key = dataTable.getName().getBytes();
+        try {
+            FenceWait fenceWait = VisibilityFence.prepareWait(key, txServiceClient);
+            fenceWait.await(10000, TimeUnit.MILLISECONDS);
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
+        } catch (TimeoutException | TransactionFailureException e) {
+            throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_UNABLE_TO_GET_WRITE_FENCE)
+            .setSchemaName(dataTable.getSchemaName().getString())
+            .setTableName(dataTable.getTableName().getString())
+            .build().buildException();
+        }
     }
 
     @Override
     public void markDMLFence(PTable table) {
-        // TODO Auto-generated method stub
-
+        byte[] logicalKey = table.getName().getBytes();
+        TransactionAware logicalTxAware = VisibilityFence.create(logicalKey);
+        if (this.txContext == null) {
+            this.txAwares.add(logicalTxAware);
+        } else {
+            this.txContext.addTransactionAware(logicalTxAware);
+        }
+        byte[] physicalKey = table.getPhysicalName().getBytes();
+        if (Bytes.compareTo(physicalKey, logicalKey) != 0) {
+            TransactionAware physicalTxAware = VisibilityFence.create(physicalKey);
+            if (this.txContext == null) {
+                this.txAwares.add(physicalTxAware);
+            } else {
+                this.txContext.addTransactionAware(physicalTxAware);
+            }
+        }
     }
 
     @Override
     public void join(PhoenixTransactionContext ctx) {
-        // TODO Auto-generated method stub
+        assert(ctx instanceof TephraTransactionContext);
+        TephraTransactionContext tephraContext = (TephraTransactionContext) ctx;
+
+        tephraContext.getAwares();
 
+        if (txContext != null) {
+            for (TransactionAware txAware : tephraContext.getAwares()) {
+                txContext.addTransactionAware(txAware);
+            }
+        } else {
+            txAwares.addAll(tephraContext.getAwares());
+        }
     }
 
-       @Override
+    @Override
     public boolean isTransactionRunning() {
-        // TODO Auto-generated method stub
+        if (this.txContext != null) {
+            return (this.txContext.getCurrentTransaction() != null) ? true : false;
+        }
+
+        if (this.tx != null) {
+            return true;
+        }
+
         return false;
     }
 
     @Override
     public void reset() {
-        // TODO Auto-generated method stub
-
+        tx = null;
+        txAwares.clear();
     }
 
     @Override
     public long getTransactionId() {
-        // TODO Auto-generated method stub
-        return 0;
+        if (this.txContext != null) {
+            return txContext.getCurrentTransaction().getTransactionId();
+        } 
+
+        if (tx != null) {
+            return tx.getTransactionId();
+        }
+
+        return HConstants.LATEST_TIMESTAMP;
     }
 
     @Override
     public long getReadPointer() {
-        // TODO Auto-generated method stub
-        return 0;
+        if (this.txContext != null) {
+            return txContext.getCurrentTransaction().getReadPointer();
+        } 
+
+        if (tx != null) {
+            return tx.getReadPointer();
+        }
+
+        return (-1);
     }
 
+   /**
+    * TephraTransactionContext specific functions
+    */
+
+    Transaction getTransaction() {
+        return this.tx;
+    }
+
+    TransactionContext getContext() {
+        return this.txContext;
+    }
+
+    List<TransactionAware> getAwares() {
+        return txAwares;
+    }
+
+    void addTransactionAware(TransactionAware txAware) {
+        if (this.txContext != null) {
+            txContext.addTransactionAware(txAware);
+        } else if (this.tx != null) {
+            txAwares.add(txAware);
+        }
+    }
+
+    // For testing
+    public long getWritePointer() {
+        if (this.txContext != null) {
+            return txContext.getCurrentTransaction().getWritePointer();
+        } 
+
+        if (tx != null) {
+            return tx.getWritePointer();
+        }
+
+        return HConstants.LATEST_TIMESTAMP;
+    }
+
+    // For testing
+    public VisibilityLevel getVisibilityLevel() {
+        if (this.txContext != null) {
+            return txContext.getCurrentTransaction().getVisibilityLevel();
+        } 
+
+        if (tx != null) {
+            return tx.getVisibilityLevel();
+        }
+
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cea251cf/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index 0d788c1..c5ba33f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -12,132 +12,119 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.tephra.hbase.TransactionAwareHTable;
 
 public class TephraTransactionTable implements PhoenixTransactionalTable {
 
-    public TephraTransactionTable(PhoenixTransactionContext ctx) {
-        // TODO Auto-generated constructor stub
+    private TransactionAwareHTable transactionAwareHTable;
+    
+    private TephraTransactionContext tephraTransactionContext;
+    
+    public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) {
+
+        assert(ctx instanceof TephraTransactionContext);
+
+        tephraTransactionContext = (TephraTransactionContext) ctx;
+
+        transactionAwareHTable = new TransactionAwareHTable(hTable);
+
+        tephraTransactionContext.addTransactionAware(transactionAwareHTable);
     }
 
     @Override
     public Result get(Get get) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
+        return transactionAwareHTable.get(get);
     }
 
     @Override
     public void put(Put put) throws IOException {
-        // TODO Auto-generated method stub
-
+        transactionAwareHTable.put(put);
     }
 
     @Override
     public void delete(Delete delete) throws IOException {
-        // TODO Auto-generated method stub
-
+        transactionAwareHTable.delete(delete);
     }
 
     @Override
     public ResultScanner getScanner(Scan scan) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
+        return transactionAwareHTable.getScanner(scan);
     }
 
     @Override
     public byte[] getTableName() {
-        // TODO Auto-generated method stub
-        return null;
+        return transactionAwareHTable.getTableName();
     }
 
     @Override
     public Configuration getConfiguration() {
-        // TODO Auto-generated method stub
-        return null;
+        return transactionAwareHTable.getConfiguration();
     }
 
     @Override
     public HTableDescriptor getTableDescriptor() throws IOException {
-        // TODO Auto-generated method stub
-        return null;
+        return transactionAwareHTable.getTableDescriptor();
     }
 
     @Override
     public boolean exists(Get get) throws IOException {
-        // TODO Auto-generated method stub
-        return false;
+        return transactionAwareHTable.exists(get);
     }
 
     @Override
     public Result[] get(List<Get> gets) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
+        return transactionAwareHTable.get(gets);
     }
 
     @Override
     public ResultScanner getScanner(byte[] family) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
+        return transactionAwareHTable.getScanner(family);
     }
 
     @Override
     public ResultScanner getScanner(byte[] family, byte[] qualifier)
             throws IOException {
-        // TODO Auto-generated method stub
-        return null;
+        return transactionAwareHTable.getScanner(family, qualifier);
     }
 
     @Override
     public void put(List<Put> puts) throws IOException {
-        // TODO Auto-generated method stub
-
+        transactionAwareHTable.put(puts);
     }
 
     @Override
     public void delete(List<Delete> deletes) throws IOException {
-        // TODO Auto-generated method stub
-
-    }
-
-    @Override
-    public HTableInterface getHTable() {
-        // TODO Auto-generated method stub
-        return null;
+        transactionAwareHTable.delete(deletes);
     }
 
     @Override
     public void setAutoFlush(boolean autoFlush) {
-        // TODO Auto-generated method stub
-
+        transactionAwareHTable.setAutoFlush(autoFlush);
     }
 
     @Override
     public boolean isAutoFlush() {
-        // TODO Auto-generated method stub
-        return false;
+        return transactionAwareHTable.isAutoFlush();
     }
 
     @Override
     public long getWriteBufferSize() {
-        // TODO Auto-generated method stub
-        return 0;
+        return transactionAwareHTable.getWriteBufferSize();
     }
 
     @Override
     public void setWriteBufferSize(long writeBufferSize) throws IOException {
-        // TODO Auto-generated method stub
-
+        transactionAwareHTable.setWriteBufferSize(writeBufferSize);
     }
 
     @Override
     public void flushCommits() throws IOException {
-        // TODO Auto-generated method stub
-
+        transactionAwareHTable.flushCommits();
     }
 
     @Override
     public void close() throws IOException {
-        // TODO Auto-generated method stub
-
+        transactionAwareHTable.close();
     }
 
 }


[30/46] phoenix git commit: PHOENIX-3792 Provide way to skip normalization of column names in phoenix-spark integration

Posted by td...@apache.org.
PHOENIX-3792 Provide way to skip normalization of column names in phoenix-spark integration


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/90e32c01
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/90e32c01
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/90e32c01

Branch: refs/heads/omid
Commit: 90e32c015207b39330ed7496db7a73dbc7b634f4
Parents: 28af89c
Author: Ankit Singhal <an...@gmail.com>
Authored: Fri Apr 21 11:48:16 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Fri Apr 21 11:48:16 2017 +0530

----------------------------------------------------------------------
 phoenix-spark/src/it/resources/globalSetup.sql  |  1 +
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 27 ++++++++++++++++++--
 .../phoenix/spark/DataFrameFunctions.scala      | 19 +++++++++++---
 .../apache/phoenix/spark/DefaultSource.scala    |  2 +-
 4 files changed, 42 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/90e32c01/phoenix-spark/src/it/resources/globalSetup.sql
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/resources/globalSetup.sql b/phoenix-spark/src/it/resources/globalSetup.sql
index dc24da7..7ac0039 100644
--- a/phoenix-spark/src/it/resources/globalSetup.sql
+++ b/phoenix-spark/src/it/resources/globalSetup.sql
@@ -17,6 +17,7 @@
 CREATE TABLE table1 (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR)
 CREATE TABLE table1_copy (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR)
 CREATE TABLE table2 (id BIGINT NOT NULL PRIMARY KEY, table1_id BIGINT, "t2col1" VARCHAR)
+CREATE TABLE table3 (id BIGINT NOT NULL PRIMARY KEY, table3_id BIGINT, "t2col1" VARCHAR)
 UPSERT INTO table1 (id, col1) VALUES (1, 'test_row_1')
 UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (1, 1, 'test_child_1')
 UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (2, 1, 'test_child_2')

http://git-wip-us.apache.org/repos/asf/phoenix/blob/90e32c01/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index d53b5ee..b8e44fe 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -20,15 +20,38 @@ import org.apache.phoenix.util.{ColumnInfo, SchemaUtil}
 import org.apache.spark.sql.types._
 import org.apache.spark.sql.{Row, SQLContext, SaveMode}
 import org.joda.time.DateTime
-
+import org.apache.spark.{SparkConf, SparkContext}
 import scala.collection.mutable.ListBuffer
-
+import org.apache.hadoop.conf.Configuration
 /**
   * Note: If running directly from an IDE, these are the recommended VM parameters:
   * -Xmx1536m -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m
   */
 class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 
+  test("Can persist data with case senstive columns (like in avro schema) using 'DataFrame.saveToPhoenix'") {
+    val sqlContext = new SQLContext(sc)
+    val df = sqlContext.createDataFrame(
+      Seq(
+        (1, 1, "test_child_1"),
+        (2, 1, "test_child_2"))).toDF("ID", "TABLE3_ID", "t2col1")
+    df.saveToPhoenix("TABLE3", zkUrl = Some(quorumAddress),skipNormalizingIdentifier=true)
+
+    // Verify results
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT * FROM TABLE3")
+
+    val checkResults = List((1, 1, "test_child_1"), (2, 1, "test_child_2"))
+    val results = ListBuffer[(Long, Long, String)]()
+    while (rs.next()) {
+      results.append((rs.getLong(1), rs.getLong(2), rs.getString(3)))
+    }
+    stmt.close()
+
+    results.toList shouldEqual checkResults
+
+  }
+  
   test("Can convert Phoenix schema") {
     val phoenixSchema = List(
       new ColumnInfo("varcharColumn", PVarchar.INSTANCE.getSqlType)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/90e32c01/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index ddf4fab..92f4c58 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -24,13 +24,16 @@ import scala.collection.JavaConversions._
 
 
 class DataFrameFunctions(data: DataFrame) extends Serializable {
-
+  def saveToPhoenix(parameters: Map[String, String]): Unit = {
+  		saveToPhoenix(parameters("table"), zkUrl = parameters.get("zkUrl"), tenantId = parameters.get("TenantId"), 
+  		skipNormalizingIdentifier=parameters.contains("skipNormalizingIdentifier"))
+   }
   def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
-                    zkUrl: Option[String] = None, tenantId: Option[String] = None): Unit = {
-
+                    zkUrl: Option[String] = None, tenantId: Option[String] = None, skipNormalizingIdentifier: Boolean = false): Unit = {
 
     // Retrieve the schema field names and normalize to Phoenix, need to do this outside of mapPartitions
-    val fieldArray = data.schema.fieldNames.map(x => SchemaUtil.normalizeIdentifier(x))
+    val fieldArray = getFieldArray(skipNormalizingIdentifier, data)
+    
 
     // Create a configuration object to use for saving
     @transient val outConfig = ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, tenantId, Some(conf))
@@ -61,4 +64,12 @@ class DataFrameFunctions(data: DataFrame) extends Serializable {
       outConfig
     )
   }
+
+  def getFieldArray(skipNormalizingIdentifier: Boolean = false, data: DataFrame) = {
+    if (skipNormalizingIdentifier) {
+      data.schema.fieldNames.map(x => x)
+    } else {
+      data.schema.fieldNames.map(x => SchemaUtil.normalizeIdentifier(x))
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/90e32c01/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala
index 743d196..e000b74 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala
@@ -44,7 +44,7 @@ class DefaultSource extends RelationProvider with CreatableRelationProvider {
     verifyParameters(parameters)
 
     // Save the DataFrame to Phoenix
-    data.saveToPhoenix(parameters("table"), zkUrl = parameters.get("zkUrl"), tenantId = parameters.get("TenantId"))
+    data.saveToPhoenix(parameters)
 
     // Return a relation of the saved data
     createRelation(sqlContext, parameters)


[27/46] phoenix git commit: Apply Local Indexes batch updates only once.

Posted by td...@apache.org.
Apply Local Indexes batch updates only once.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5bd7f79b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5bd7f79b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5bd7f79b

Branch: refs/heads/omid
Commit: 5bd7f79b51309505a19f854d05cb000f5cd1eb9f
Parents: ee886ba
Author: Lars Hofhansl <la...@apache.org>
Authored: Wed Apr 19 17:41:00 2017 -0700
Committer: Lars Hofhansl <la...@apache.org>
Committed: Wed Apr 19 17:41:00 2017 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/index/LocalIndexIT.java     | 30 ++++++++
 .../org/apache/phoenix/hbase/index/Indexer.java | 73 +++++++-------------
 2 files changed, 56 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5bd7f79b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index a7d0028..8d3316b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -617,6 +617,36 @@ public class LocalIndexIT extends BaseLocalIndexIT {
         }
     }
 
+    @Test
+    public void testLocalGlobalIndexMix() throws Exception {
+        if (isNamespaceMapped) { return; }
+        String tableName = generateUniqueName();
+        Connection conn1 = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT NULL,\n" +
+                "k1 INTEGER NOT NULL,\n" +
+                "k2 INTEGER NOT NULL,\n" +
+                "k3 INTEGER,\n" +
+                "v1 VARCHAR,\n" +
+                "v2 VARCHAR,\n" +
+                "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2))\n";
+        conn1.createStatement().execute(ddl);
+        conn1.createStatement().execute("CREATE LOCAL INDEX LV1 ON " + tableName + "(v1)");
+        conn1.createStatement().execute("CREATE INDEX GV2 ON " + tableName + "(v2)");
+
+        conn1.createStatement().execute("UPSERT INTO " + tableName + " values('b',1,2,4,'z','3')");
+        conn1.createStatement().execute("UPSERT INTO " + tableName + " values('f',1,2,3,'a','0')");
+        conn1.createStatement().execute("UPSERT INTO " + tableName + " values('j',2,4,2,'a','2')");
+        conn1.createStatement().execute("UPSERT INTO " + tableName + " values('q',3,1,1,'c','1')");
+        conn1.commit();
+        ResultSet rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " + tableName + " WHERE v1 = 'c'");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " + tableName + " WHERE v2 = '2'");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        conn1.close();
+    }
+
     private void copyLocalIndexHFiles(Configuration conf, HRegionInfo fromRegion, HRegionInfo toRegion, boolean move)
             throws IOException {
         Path root = FSUtils.getRootDir(conf);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5bd7f79b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index de98051..9fc76e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -372,7 +372,7 @@ public class Indexer extends BaseRegionObserver {
       super.postPut(e, put, edit, durability);
           return;
         }
-    doPost(edit, put, durability, true, false);
+    doPost(edit, put, durability);
   }
 
   @Override
@@ -382,29 +382,10 @@ public class Indexer extends BaseRegionObserver {
       super.postDelete(e, delete, edit, durability);
           return;
         }
-    doPost(edit, delete, durability, true, false);
+    doPost(edit, delete, durability);
   }
 
   @Override
-  public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
-      MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
-      if (this.disabled) {
-        super.postBatchMutate(c, miniBatchOp);
-        return;
-      }
-      WALEdit edit = miniBatchOp.getWalEdit(0);
-      if (edit != null) {
-        IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
-        if (ikv != null) {
-          // This will prevent the postPut and postDelete hooks from doing anything
-          // We need to do this now, as the postBatchMutateIndispensably (where the
-          // actual index writing gets done) is called after the postPut and postDelete.
-          ikv.markBatchFinished();
-        }
-      }
-  }
-  
-  @Override
   public void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> c,
       MiniBatchOperationInProgress<Mutation> miniBatchOp, final boolean success) throws IOException {
       if (this.disabled) {
@@ -417,17 +398,13 @@ public class Indexer extends BaseRegionObserver {
         //each batch operation, only the first one will have anything useful, so we can just grab that
         Mutation mutation = miniBatchOp.getOperation(0);
         WALEdit edit = miniBatchOp.getWalEdit(0);
-        // We're forcing the index writes here because we've marked the index batch as "finished"
-        // to prevent postPut and postDelete from doing anything, but hold off on writing them
-        // until now so we're outside of the MVCC lock (see PHOENIX-3789). Without this hacky
-        // forceWrite flag, we'd ignore them again here too.
-        doPost(edit, mutation, mutation.getDurability(), false, true);
+        doPost(edit, mutation, mutation.getDurability());
     }
   }
 
-  private void doPost(WALEdit edit, Mutation m, final Durability durability, boolean allowLocalUpdates, boolean forceWrite) throws IOException {
+  private void doPost(WALEdit edit, Mutation m, final Durability durability) throws IOException {
     try {
-      doPostWithExceptions(edit, m, durability, allowLocalUpdates, forceWrite);
+      doPostWithExceptions(edit, m, durability);
       return;
     } catch (Throwable e) {
       rethrowIndexingException(e);
@@ -436,7 +413,7 @@ public class Indexer extends BaseRegionObserver {
         "Somehow didn't complete the index update, but didn't return succesfully either!");
   }
 
-  private void doPostWithExceptions(WALEdit edit, Mutation m, final Durability durability, boolean allowLocalUpdates, boolean forceWrite)
+  private void doPostWithExceptions(WALEdit edit, Mutation m, final Durability durability)
           throws Exception {
       //short circuit, if we don't need to do any work
       if (durability == Durability.SKIP_WAL || !this.builder.isEnabled(m) || edit == null) {
@@ -470,30 +447,32 @@ public class Indexer extends BaseRegionObserver {
            * once (this hook gets called with the same WALEdit for each Put/Delete in a batch, which can
            * lead to writing all the index updates for each Put/Delete).
            */
-          if ((!ikv.getBatchFinished() || forceWrite) || allowLocalUpdates) {
+          if (!ikv.getBatchFinished()) {
               Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(edit);
 
               // the WAL edit is kept in memory and we already specified the factory when we created the
               // references originally - therefore, we just pass in a null factory here and use the ones
               // already specified on each reference
               try {
-            	  if (!ikv.getBatchFinished() || forceWrite) {
-            		  current.addTimelineAnnotation("Actually doing index update for first time");
-            		  writer.writeAndKillYourselfOnFailure(indexUpdates, allowLocalUpdates);
-            	  } else if (allowLocalUpdates) {
-            		  Collection<Pair<Mutation, byte[]>> localUpdates =
-            				  new ArrayList<Pair<Mutation, byte[]>>();
-            		  current.addTimelineAnnotation("Actually doing local index update for first time");
-            		  for (Pair<Mutation, byte[]> mutation : indexUpdates) {
-            			  if (Bytes.toString(mutation.getSecond()).equals(
-            					  environment.getRegion().getTableDesc().getNameAsString())) {
-            				  localUpdates.add(mutation);
-            			  }
-            		  }
-                      if(!localUpdates.isEmpty()) {
-                    	  writer.writeAndKillYourselfOnFailure(localUpdates, allowLocalUpdates);
-                      }
-            	  }
+        		  current.addTimelineAnnotation("Actually doing index update for first time");
+                  Collection<Pair<Mutation, byte[]>> localUpdates =
+                          new ArrayList<Pair<Mutation, byte[]>>();
+                  Collection<Pair<Mutation, byte[]>> remoteUpdates =
+                          new ArrayList<Pair<Mutation, byte[]>>();
+        		  for (Pair<Mutation, byte[]> mutation : indexUpdates) {
+        			  if (Bytes.toString(mutation.getSecond()).equals(
+        					  environment.getRegion().getTableDesc().getNameAsString())) {
+        				  localUpdates.add(mutation);
+        			  } else {
+                          remoteUpdates.add(mutation);
+        			  }
+        		  }
+                  if(!remoteUpdates.isEmpty()) {
+                      writer.writeAndKillYourselfOnFailure(remoteUpdates, false);
+                  }
+                  if(!localUpdates.isEmpty()) {
+                      writer.writeAndKillYourselfOnFailure(localUpdates, true);
+                  }
               } finally {                  // With a custom kill policy, we may throw instead of kill the server.
                   // Without doing this in a finally block (at least with the mini cluster),
                   // the region server never goes down.


[39/46] phoenix git commit: augment transaction table to the new hbase functions

Posted by td...@apache.org.
augment transaction table to the new hbase functions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/46e4b1a2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/46e4b1a2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/46e4b1a2

Branch: refs/heads/omid
Commit: 46e4b1a2ab374d47bddf7566e1dc7e0a17d54118
Parents: f6f8dda
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Mon May 8 15:33:26 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Mon May 8 15:33:26 2017 +0300

----------------------------------------------------------------------
 .../transaction/OmidTransactionTable.java       | 24 ++++++++++++++++++++
 .../transaction/TephraTransactionTable.java     | 19 ++++++++++++++++
 2 files changed, 43 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/46e4b1a2/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index d2cd020..d2fd4c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -320,4 +320,28 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
         return false;
     }
 
+    @Override
+    public int getOperationTimeout() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public int getRpcTimeout() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
+
+    @Override
+    public void setOperationTimeout(int arg0) {
+        // TODO Auto-generated method stub
+        
+    }
+
+    @Override
+    public void setRpcTimeout(int arg0) {
+        // TODO Auto-generated method stub
+        
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/46e4b1a2/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index 49753f0..8ee784e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -311,4 +311,23 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
         return transactionAwareHTable.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
     }
 
+    @Override
+    public void setOperationTimeout(int i) {
+//        transactionAwareHTable.setOperationTimeout(i);
+    }
+
+    @Override
+    public int getOperationTimeout() {
+        return 0; //transactionAwareHTable.getOperationTimeout();
+    }
+
+    @Override
+    public void setRpcTimeout(int i) {
+//        transactionAwareHTable.setRpcTimeout(i);
+    }
+
+    @Override
+    public int getRpcTimeout() {
+        return 0; //transactionAwareHTable.getRpcTimeout();
+    }
 }


[33/46] phoenix git commit: PHOENIX-3603 Fix compilation errors against hbase 1.3

Posted by td...@apache.org.
PHOENIX-3603 Fix compilation errors against hbase 1.3


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5b099014
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5b099014
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5b099014

Branch: refs/heads/omid
Commit: 5b099014446865c12779f3882fd8b407496717ea
Parents: 92b951e
Author: Zach York <zy...@amazon.com>
Authored: Wed Jan 25 20:42:08 2017 -0800
Committer: Andrew Purtell <ap...@apache.org>
Committed: Tue Apr 25 15:14:43 2017 -0700

----------------------------------------------------------------------
 phoenix-assembly/pom.xml                        |  2 +-
 phoenix-client/pom.xml                          |  2 +-
 phoenix-core/pom.xml                            |  2 +-
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  2 +-
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   | 10 +++++++++
 .../apache/phoenix/execute/DelegateHTable.java  | 22 +++++++++++++++++++-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  2 +-
 .../recovery/TestPerRegionIndexWriteCache.java  |  2 +-
 phoenix-flume/pom.xml                           |  2 +-
 phoenix-hive/pom.xml                            |  2 +-
 phoenix-kafka/pom.xml                           |  2 +-
 phoenix-pherf/pom.xml                           |  2 +-
 phoenix-pig/pom.xml                             |  2 +-
 phoenix-queryserver-client/pom.xml              |  2 +-
 phoenix-queryserver/pom.xml                     |  2 +-
 phoenix-server/pom.xml                          |  2 +-
 phoenix-spark/pom.xml                           |  2 +-
 phoenix-tracing-webapp/pom.xml                  |  2 +-
 pom.xml                                         |  4 ++--
 19 files changed, 49 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index a6f6f64..829ce7b 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-assembly</artifactId>
   <name>Phoenix Assembly</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index fb0520c..f436345 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-client</artifactId>
   <name>Phoenix Client</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 9d6e0f4..58162b6 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-core</artifactId>
   <name>Phoenix Core</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 0b48a1a..a55fe7e 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -264,7 +264,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
    * @throws IOException
    */
   private WAL createWAL(final Configuration c, WALFactory walFactory) throws IOException {
-    WAL wal = walFactory.getWAL(new byte[]{});
+    WAL wal = walFactory.getWAL(new byte[]{}, null);
 
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
     // long gone.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
index 4fdddf5..7712cc6 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
@@ -115,6 +115,16 @@ public class PhoenixRpcScheduler extends RpcScheduler {
         return this.delegate.getActiveRpcHandlerCount() + this.indexCallExecutor.getActiveHandlerCount() + this.metadataCallExecutor.getActiveHandlerCount();
     }
 
+    @Override
+    public long getNumGeneralCallsDropped() {
+        return delegate.getNumGeneralCallsDropped();
+    }
+
+    @Override
+    public long getNumLifoModeSwitches() {
+        return delegate.getNumLifoModeSwitches();
+    }
+
     @VisibleForTesting
     public void setIndexExecutorForTesting(RpcExecutor executor) {
         this.indexCallExecutor = executor;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
index 6b3f9ca..f45b356 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
@@ -279,7 +279,27 @@ public class DelegateHTable implements HTableInterface {
         return delegate.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
     }
 
-	@Override
+    @Override
+    public void setOperationTimeout(int i) {
+        delegate.setOperationTimeout(i);
+    }
+
+    @Override
+    public int getOperationTimeout() {
+        return delegate.getOperationTimeout();
+    }
+
+    @Override
+    public void setRpcTimeout(int i) {
+        delegate.setRpcTimeout(i);
+    }
+
+    @Override
+    public int getRpcTimeout() {
+        return delegate.getRpcTimeout();
+    }
+
+    @Override
 	public boolean[] existsAll(List<Get> gets) throws IOException {
 		return delegate.existsAll(gets);
 	}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
index 289b490..3a2780d 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
@@ -93,7 +93,7 @@ public class PhoenixIndexRpcSchedulerTest {
         RequestHeader header = RequestHeader.newBuilder().setPriority(priority).build();
         RpcServer server = new RpcServer(null, "test-rpcserver", null, isa, conf, scheduler);
         RpcServer.Call call =
-                server.new Call(0, null, null, header, null, null, connection, null, 10, null, null);
+                server.new Call(0, null, null, header, null, null, connection, null, 10, null, null, 0);
         Mockito.when(task.getCall()).thenReturn(call);
 
         scheduler.dispatch(task);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
index 35b607e..819c7f6 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
@@ -81,7 +81,7 @@ public class TestPerRegionIndexWriteCache {
       Random rn = new Random();
       tableName = TableName.valueOf("TestPerRegion" + rn.nextInt());
       WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), null, "TestPerRegionIndexWriteCache");
-      wal = walFactory.getWAL(Bytes.toBytes("logs"));
+      wal = walFactory.getWAL(Bytes.toBytes("logs"), null);
       HTableDescriptor htd = new HTableDescriptor(tableName);
       HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
       htd.addFamily(a);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 809f7fd..b579b23 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-flume</artifactId>
   <name>Phoenix - Flume</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-hive/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 72b3c39..03ce99c 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-hive</artifactId>
   <name>Phoenix - Hive</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 5559e5e..36ea91d 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@
 	<parent>
 		<groupId>org.apache.phoenix</groupId>
 		<artifactId>phoenix</artifactId>
-		<version>4.11.0-HBase-1.2-SNAPSHOT</version>
+		<version>4.11.0-HBase-1.3-SNAPSHOT</version>
 	</parent>
 	<artifactId>phoenix-kafka</artifactId>
 	<name>Phoenix - Kafka</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-pherf/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index ecf8251..9fa9942 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@
 	<parent>
 		<groupId>org.apache.phoenix</groupId>
 		<artifactId>phoenix</artifactId>
-		<version>4.11.0-HBase-1.2-SNAPSHOT</version>
+		<version>4.11.0-HBase-1.3-SNAPSHOT</version>
 	</parent>
 
 	<artifactId>phoenix-pherf</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index e191397..def9d1e 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-pig</artifactId>
   <name>Phoenix - Pig</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-queryserver-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver-client/pom.xml b/phoenix-queryserver-client/pom.xml
index 432f854..60fc140 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-queryserver-client</artifactId>
   <name>Phoenix Query Server Client</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-queryserver/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index dc2a1ab..2ce45fc 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-queryserver</artifactId>
   <name>Phoenix Query Server</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-server/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 6204e99..2d9103d 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-server</artifactId>
   <name>Phoenix Server</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 612300f..2eac5ad 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -28,7 +28,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+    <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-spark</artifactId>
   <name>Phoenix - Spark</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-tracing-webapp/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-tracing-webapp/pom.xml b/phoenix-tracing-webapp/pom.xml
index 35f70ad..08a4ffd 100755
--- a/phoenix-tracing-webapp/pom.xml
+++ b/phoenix-tracing-webapp/pom.xml
@@ -27,7 +27,7 @@
     <parent>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix</artifactId>
-      <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+      <version>4.11.0-HBase-1.3-SNAPSHOT</version>
     </parent>
 
     <artifactId>phoenix-tracing-webapp</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 248a244..1674358 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.phoenix</groupId>
   <artifactId>phoenix</artifactId>
-  <version>4.11.0-HBase-1.2-SNAPSHOT</version>
+  <version>4.11.0-HBase-1.3-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Phoenix</name>
   <description>A SQL layer over HBase</description>
@@ -65,7 +65,7 @@
     <top.dir>${project.basedir}</top.dir>
 
     <!-- Hadoop Versions -->
-    <hbase.version>1.2.3</hbase.version>
+    <hbase.version>1.3.1</hbase.version>
     <hadoop-two.version>2.7.1</hadoop-two.version>
 
     <!-- Dependency versions -->


[02/46] phoenix git commit: Add the context to the table constructor and change DML/DDL function names

Posted by td...@apache.org.
Add the context to the table constructor and change DML/DDL function names


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1d32eb29
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1d32eb29
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1d32eb29

Branch: refs/heads/omid
Commit: 1d32eb29fa216295976c7f17a329601768c79171
Parents: 7c2ca8c
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Thu Feb 9 16:02:05 2017 +0200
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Thu Feb 9 16:02:05 2017 +0200

----------------------------------------------------------------------
 .../transaction/OmidTransactionContext.java      | 16 ++--------------
 .../transaction/OmidTransactionTable.java        |  4 ++++
 .../transaction/PhoenixTransactionContext.java   | 19 ++-----------------
 .../transaction/TephraTransactionContext.java    | 18 +++---------------
 .../transaction/TephraTransactionTable.java      |  4 ++++
 5 files changed, 15 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1d32eb29/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index 5f0bdce..bc5b05b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -38,14 +38,14 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public void commitDDL(PTable dataTable) throws SQLException,
+    public void commitDDLFence(PTable dataTable) throws SQLException,
             InterruptedException, TimeoutException {
         // TODO Auto-generated method stub
 
     }
 
     @Override
-    public void markDML(PTable table) {
+    public void markDMLFence(PTable table) {
         // TODO Auto-generated method stub
 
     }
@@ -57,18 +57,6 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public void addTransactionTable(PhoenixTransactionalTable table) {
-        // TODO Auto-generated method stub
-
-    }
-
-    @Override
-    public void addTransactionToTable(PhoenixTransactionalTable table) {
-        // TODO Auto-generated method stub
-
-    }
-
-    @Override
     public boolean isTransactionRunning() {
         // TODO Auto-generated method stub
         return false;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1d32eb29/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index f5cdd17..5a5291c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -15,6 +15,10 @@ import org.apache.hadoop.hbase.client.Scan;
 
 public class OmidTransactionTable implements PhoenixTransactionalTable {
 
+    public OmidTransactionTable(PhoenixTransactionContext ctx) {
+        // TODO Auto-generated constructor stub
+    }
+    
     @Override
     public Result get(Get get) throws IOException {
         // TODO Auto-generated method stub

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1d32eb29/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index b391144..3ac0ae3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -51,7 +51,7 @@ public interface PhoenixTransactionContext {
      * @throws InterruptedException
      * @throws TimeoutException
      */
-    public void commitDDL(PTable dataTable)
+    public void commitDDLFence(PTable dataTable)
             throws SQLException, InterruptedException, TimeoutException;
     
     /**
@@ -60,7 +60,7 @@ public interface PhoenixTransactionContext {
      * 
      * @param table  the table that the DML command works on
      */
-    public void markDML(PTable table);
+    public void markDMLFence(PTable table);
     
     /**
      * Augment the current context with ctx modified keys
@@ -69,21 +69,6 @@ public interface PhoenixTransactionContext {
      */
     public void join(PhoenixTransactionContext ctx);
     
-    /** 
-     * Add transaction table to the context.
-     * Will be mostly used by Tephra, since Omid keeps the changeset inside the transaction while 
-     * Tephra keeps it distributed at the different awares.
-     * 
-     * @param table
-     */
-    public void addTransactionTable(PhoenixTransactionalTable table);
-    
-    /**
-     * Add transaction to the table. 
-     * @param table
-     */
-    public void addTransactionToTable(PhoenixTransactionalTable table);
-    
     /**
      * Is there a transaction in flight?
      */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1d32eb29/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index cfe8e9b..17c70f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -38,14 +38,14 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     }
 
     @Override
-    public void commitDDL(PTable dataTable) throws SQLException,
+    public void commitDDLFence(PTable dataTable) throws SQLException,
             InterruptedException, TimeoutException {
         // TODO Auto-generated method stub
 
     }
 
     @Override
-    public void markDML(PTable table) {
+    public void markDMLFence(PTable table) {
         // TODO Auto-generated method stub
 
     }
@@ -56,19 +56,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
     }
 
-    @Override
-    public void addTransactionTable(PhoenixTransactionalTable table) {
-        // TODO Auto-generated method stub
-
-    }
-
-    @Override
-    public void addTransactionToTable(PhoenixTransactionalTable table) {
-        // TODO Auto-generated method stub
-
-    }
-
-    @Override
+       @Override
     public boolean isTransactionRunning() {
         // TODO Auto-generated method stub
         return false;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1d32eb29/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index 09d37c3..0d788c1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -15,6 +15,10 @@ import org.apache.hadoop.hbase.client.Scan;
 
 public class TephraTransactionTable implements PhoenixTransactionalTable {
 
+    public TephraTransactionTable(PhoenixTransactionContext ctx) {
+        // TODO Auto-generated constructor stub
+    }
+
     @Override
     public Result get(Get get) throws IOException {
         // TODO Auto-generated method stub


[05/46] phoenix git commit: Eddie comment regarding ternary operation

Posted by td...@apache.org.
Eddie comment regarding ternary operation


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bcffce98
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bcffce98
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bcffce98

Branch: refs/heads/omid
Commit: bcffce98b9f605f5b455376798aea0e979630837
Parents: cea251c
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Sun Feb 19 09:12:25 2017 +0200
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Sun Feb 19 09:12:25 2017 +0200

----------------------------------------------------------------------
 .../org/apache/phoenix/transaction/TephraTransactionContext.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bcffce98/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 81c9fd1..9c7337d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -34,7 +34,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
 
     public TephraTransactionContext(PhoenixTransactionContext ctx, PhoenixConnection connection, boolean threadSafe) {
 
-        this.txServiceClient = connection.getQueryServices().getTransactionSystemClient(); // TODO Should be wrapped for Omid side usage
+        this.txServiceClient = connection.getQueryServices().getTransactionSystemClient();
 
         assert(ctx instanceof TephraTransactionContext);
         TephraTransactionContext tephraTransactionContext = (TephraTransactionContext) ctx;
@@ -191,7 +191,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
     @Override
     public boolean isTransactionRunning() {
         if (this.txContext != null) {
-            return (this.txContext.getCurrentTransaction() != null) ? true : false;
+            return (this.txContext.getCurrentTransaction() != null);
         }
 
         if (this.tx != null) {


[14/46] phoenix git commit: fix bugs

Posted by td...@apache.org.
fix bugs


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f584e5f1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f584e5f1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f584e5f1

Branch: refs/heads/omid
Commit: f584e5f1a53cfaecb309ecf3201011a1579ebf47
Parents: fa69563
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Mon Mar 27 08:58:15 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Mon Mar 27 08:58:15 2017 +0300

----------------------------------------------------------------------
 .../apache/phoenix/execute/PartialCommitIT.java |   4 +-
 .../phoenix/tx/FlappingTransactionIT.java       |  27 ++--
 .../phoenix/cache/IndexMetaDataCache.java       |   7 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   4 +-
 .../phoenix/coprocessor/ScanRegionObserver.java |   5 +-
 .../apache/phoenix/execute/MutationState.java   |  51 +++-----
 .../index/IndexMetaDataCacheFactory.java        |  15 ++-
 .../phoenix/index/PhoenixIndexMetaData.java     |  14 +--
 .../index/PhoenixTransactionalIndexer.java      |  35 +++---
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   3 +-
 .../transaction/OmidTransactionContext.java     |  18 +++
 .../transaction/PhoenixTransactionContext.java  |  29 ++++-
 .../transaction/TephraTransactionContext.java   |  87 ++++++++++++-
 .../phoenix/transaction/TransactionFactory.java | 126 +++++++++++++++++++
 .../apache/phoenix/util/TransactionUtil.java    |   8 +-
 15 files changed, 325 insertions(+), 108 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index a5555f3..636cd84 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -271,11 +271,11 @@ public class PartialCommitIT extends BaseOwnClusterIT {
         return new PhoenixConnection(phxCon, null) {
             @Override
             protected MutationState newMutationState(int maxSize) {
-                return new MutationState(maxSize, this, mutations, null, null);
+                return new MutationState(maxSize, this, mutations, false, null);
             };
         };
     }
-    
+
     public static class FailingRegionObserver extends SimpleRegionObserver {
         @Override
         public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
index 5a990cf..d34f403 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
@@ -42,6 +42,9 @@ import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
+import org.apache.phoenix.transaction.PhoenixTransactionalTable;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.tephra.TransactionContext;
@@ -228,15 +231,17 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
         }
 
         // Use HBase level Tephra APIs to start a new transaction
-        TransactionAwareHTable txAware = new TransactionAwareHTable(htable, TxConstants.ConflictDetection.ROW);
-        TransactionContext txContext = new TransactionContext(txServiceClient, txAware);
-        txContext.start();
-        
+        //TransactionAwareHTable txAware = new TransactionAwareHTable(htable, TxConstants.ConflictDetection.ROW);
+        PhoenixTransactionContext txContext = TransactionFactory.getTransactionFactory().getTransactionContext(pconn);
+        PhoenixTransactionalTable txTable = TransactionFactory.getTransactionFactory().getTransactionalTable(txContext, htable);
+
+        txContext.begin();
+
         // Use HBase APIs to add a new row
         Put put = new Put(Bytes.toBytes("z"));
         put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
         put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
-        txAware.put(put);
+        txTable.put(put);
         
         // Use Phoenix APIs to add new row (sharing the transaction context)
         pconn.setTransactionContext(txContext);
@@ -259,7 +264,7 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
         assertEquals(3,rs.getInt(1));
         
         // Use Tephra APIs directly to finish (i.e. commit) the transaction
-        txContext.finish();
+        txContext.commit();
         
         // Confirm that attempt to commit row with conflict fails
         try {
@@ -279,14 +284,16 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
         }
         
         // Repeat the same as above, but this time abort the transaction
-        txContext = new TransactionContext(txServiceClient, txAware);
-        txContext.start();
+        txContext = TransactionFactory.getTransactionFactory().getTransactionContext(pconn);
+        txTable = TransactionFactory.getTransactionFactory().getTransactionalTable(txContext, htable);
+
+        txContext.begin();
         
         // Use HBase APIs to add a new row
         put = new Put(Bytes.toBytes("j"));
         put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
         put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
-        txAware.put(put);
+        txTable.put(put);
         
         // Use Phoenix APIs to add new row (sharing the transaction context)
         pconn.setTransactionContext(txContext);
@@ -325,4 +332,4 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
         assertTrue(result.isEmpty());
     }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
index d22993c..16207c8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java
@@ -23,9 +23,8 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.tephra.Transaction;
-
 import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
 
 public interface IndexMetaDataCache extends Closeable {
     public static final IndexMetaDataCache EMPTY_INDEX_META_DATA_CACHE = new IndexMetaDataCache() {
@@ -40,11 +39,11 @@ public interface IndexMetaDataCache extends Closeable {
         }
 
         @Override
-        public Transaction getTransaction() {
+        public PhoenixTransactionContext getTransactionContext() {
             return null;
         }
         
     };
     public List<IndexMaintainer> getIndexMaintainers();
-    public Transaction getTransaction();
+    public PhoenixTransactionContext getTransactionContext();
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index f6bd512..321d117 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -315,7 +315,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             final byte[][] viewConstants, final TupleProjector projector,
             final ImmutableBytesWritable ptr) {
         return getWrappedScanner(c, s, null, null, offset, scan, dataColumns, tupleProjector,
-                dataRegion, indexMaintainer, null, viewConstants, null, null, projector, ptr);
+                dataRegion, indexMaintainer, viewConstants, null, null, projector, ptr);
     }
 
     /**
@@ -330,7 +330,6 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
      * @param tupleProjector
      * @param dataRegion
      * @param indexMaintainer
-     * @param tx current transaction
      * @param viewConstants
      */
     protected RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
@@ -338,7 +337,6 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             final Expression[] arrayFuncRefs, final int offset, final Scan scan,
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
             final Region dataRegion, final IndexMaintainer indexMaintainer,
-            Transaction tx, 
             final byte[][] viewConstants, final KeyValueSchema kvSchema,
             final ValueBitSet kvSchemaBitSet, final TupleProjector projector,
             final ImmutableBytesWritable ptr) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index ade88db..0e0e3ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -204,7 +204,6 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
         Region dataRegion = null;
         IndexMaintainer indexMaintainer = null;
         byte[][] viewConstants = null;
-        Transaction tx = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
         if (dataColumns != null) {
             tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
@@ -213,15 +212,13 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
             indexMaintainer = indexMaintainers.get(0);
             viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
-            byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
-            tx = MutationState.decodeTransaction(txState);
         }
 
         final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
         final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
         innerScanner =
                 getWrappedScanner(c, innerScanner, arrayKVRefs, arrayFuncRefs, offset, scan,
-                    dataColumns, tupleProjector, dataRegion, indexMaintainer, tx,
+                    dataColumns, tupleProjector, dataRegion, indexMaintainer,
                     viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr);
 
         final ImmutableBytesPtr tenantId = ScanUtil.getTenantId(scan);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index c480e30..23c8b2a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -87,7 +87,7 @@ import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
 import org.apache.phoenix.transaction.PhoenixTransactionalTable;
-import org.apache.phoenix.transaction.TephraTransactionContext;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
@@ -97,17 +97,6 @@ import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.TransactionUtil;
-import org.apache.tephra.Transaction;
-import org.apache.tephra.Transaction.VisibilityLevel;
-import org.apache.tephra.TransactionAware;
-import org.apache.tephra.TransactionCodec;
-import org.apache.tephra.TransactionConflictException;
-import org.apache.tephra.TransactionContext;
-import org.apache.tephra.TransactionFailureException;
-import org.apache.tephra.TransactionSystemClient;
-import org.apache.tephra.hbase.TransactionAwareHTable;
-import org.apache.tephra.visibility.FenceWait;
-import org.apache.tephra.visibility.VisibilityFence;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -124,7 +113,6 @@ import com.google.common.collect.Sets;
  */
 public class MutationState implements SQLCloseable {
     private static final Logger logger = LoggerFactory.getLogger(MutationState.class);
-    private static final TransactionCodec CODEC = new TransactionCodec();
     private static final int[] EMPTY_STATEMENT_INDEX_ARRAY = new int[0];
     private static final int MAX_COMMIT_RETRIES = 3;
 
@@ -183,20 +171,20 @@ public class MutationState implements SQLCloseable {
                 : NoOpMutationMetricsQueue.NO_OP_MUTATION_METRICS_QUEUE;
         if (subTask == false) {
             if (txContext == null) {
-                phoenixTransactionContext = new TephraTransactionContext(connection);
+                phoenixTransactionContext = TransactionFactory.getTransactionFactory().getTransactionContext(connection);
             } else {
                 isExternalTxContext = true;
-                phoenixTransactionContext = new TephraTransactionContext(txContext, connection, subTask);
+                phoenixTransactionContext = TransactionFactory.getTransactionFactory().getTransactionContext(txContext, connection, subTask);
             }
         } else {
             // this code path is only used while running child scans, we can't pass the txContext to child scans
             // as it is not thread safe, so we use the tx member variable
-            phoenixTransactionContext = new TephraTransactionContext(txContext, connection, subTask);
+            phoenixTransactionContext = TransactionFactory.getTransactionFactory().getTransactionContext(txContext, connection, subTask);
         }
     }
 
     public MutationState(TableRef table, Map<ImmutableBytesPtr,RowMutationState> mutations, long sizeOffset, long maxSize, PhoenixConnection connection) {
-        this(maxSize, connection, true, connection.getMutationState().getPhoenixTransactionContext(), sizeOffset);
+        this(maxSize, connection, false, null, sizeOffset);
         this.mutations.put(table, mutations);
         this.numRows = mutations.size();
         throwIfTooBig();
@@ -220,7 +208,7 @@ public class MutationState implements SQLCloseable {
      * @param dataTable the data table upon which an index is being added
      * @throws SQLException
      */
-    public void commitDDLFence(PTable dataTable, Logger logger) throws SQLException {
+    public void commitDDLFence(PTable dataTable) throws SQLException {
         if (dataTable.isTransactional()) {
             try {
                 phoenixTransactionContext.commitDDLFence(dataTable, logger);
@@ -305,13 +293,11 @@ public class MutationState implements SQLCloseable {
     // would not change as these threads are running.
     public HTableInterface getHTable(PTable table) throws SQLException {
         HTableInterface htable = this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes());
-        Transaction currentTx;
-        if (table.isTransactional() && (currentTx=getTransaction()) != null) {
-            TransactionAwareHTable txAware = TransactionUtil.getTransactionAwareHTable(htable, table.isImmutableRows());
+        if (table.isTransactional() && phoenixTransactionContext.isTransactionRunning()) {
+            PhoenixTransactionalTable phoenixTransactionTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, htable, table.isImmutableRows());
             // Using cloned mutationState as we may have started a new transaction already
             // if auto commit is true and we need to use the original one here.
-            txAware.startTx(currentTx);
-            htable = txAware;
+            htable = phoenixTransactionTable;
         }
         return htable;
     }
@@ -440,7 +426,7 @@ public class MutationState implements SQLCloseable {
             return;
         }
 
-        phoenixTransactionContext.join(getPhoenixTransactionContext());
+        phoenixTransactionContext.join(newMutationState.getPhoenixTransactionContext());
 
         this.sizeOffset += newMutationState.sizeOffset;
         joinMutationState(newMutationState.mutations, this.mutations);
@@ -1090,17 +1076,9 @@ public class MutationState implements SQLCloseable {
     }
 
     public byte[] encodeTransaction() throws SQLException {
-        try {
-            return CODEC.encode(getTransaction());
-        } catch (IOException e) {
-            throw new SQLException(e);
-        }
+        return phoenixTransactionContext.encodeTransaction();
     }
     
-    public static Transaction decodeTransaction(byte[] txnBytes) throws IOException {
-        return (txnBytes == null || txnBytes.length==0) ? null : CODEC.decode(txnBytes);
-    }
-
     private ServerCache setMetaDataOnMutations(TableRef tableRef, List<? extends Mutation> mutations,
             ImmutableBytesWritable indexMetaDataPtr) throws SQLException {
         PTable table = tableRef.getTable();
@@ -1333,12 +1311,13 @@ public class MutationState implements SQLCloseable {
      * @throws SQLException
      */
     public boolean sendUncommitted(Iterator<TableRef> tableRefs) throws SQLException {
-        Transaction currentTx = getTransaction();
-        if (currentTx != null) {
+
+        if (phoenixTransactionContext.isTransactionRunning()) {
             // Initialize visibility so that transactions see their own writes.
             // The checkpoint() method will set it to not see writes if necessary.
-            currentTx.setVisibility(VisibilityLevel.SNAPSHOT);
+            phoenixTransactionContext.setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT);
         }
+
         Iterator<TableRef> filteredTableRefs = Iterators.filter(tableRefs, new Predicate<TableRef>(){
             @Override
             public boolean apply(TableRef tableRef) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java
index 56849fe..8658524 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java
@@ -24,15 +24,13 @@ import java.io.IOException;
 import java.sql.SQLException;
 import java.util.List;
 
-import org.apache.tephra.Transaction;
-
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.cache.IndexMetaDataCache;
 import org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory;
-import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
-import org.apache.phoenix.util.TransactionUtil;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
+import org.apache.phoenix.transaction.TransactionFactory;
 
 public class IndexMetaDataCacheFactory implements ServerCacheFactory {
     public IndexMetaDataCacheFactory() {
@@ -49,11 +47,12 @@ public class IndexMetaDataCacheFactory implements ServerCacheFactory {
     @Override
     public Closeable newCache (ImmutableBytesWritable cachePtr, byte[] txState, final MemoryChunk chunk) throws SQLException {
         // just use the standard keyvalue builder - this doesn't really need to be fast
+        
         final List<IndexMaintainer> maintainers = 
                 IndexMaintainer.deserialize(cachePtr, GenericKeyValueBuilder.INSTANCE);
-        final Transaction txn;
+        final PhoenixTransactionContext txnContext;
         try {
-            txn = txState.length!=0 ? MutationState.decodeTransaction(txState) : null;
+            txnContext = txState.length != 0 ? TransactionFactory.getTransactionFactory().getTransactionContext(txState) : null;
         } catch (IOException e) {
             throw new SQLException(e);
         }
@@ -70,8 +69,8 @@ public class IndexMetaDataCacheFactory implements ServerCacheFactory {
             }
 
             @Override
-            public Transaction getTransaction() {
-                return txn;
+            public PhoenixTransactionContext getTransactionContext() {
+                return txnContext;
             }
         };
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
index d22e957..82fe2f3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
@@ -30,12 +30,12 @@ import org.apache.phoenix.cache.TenantCache;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
-import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.hbase.index.covered.IndexMetaData;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ServerUtil;
-import org.apache.tephra.Transaction;
 
 public class PhoenixIndexMetaData implements IndexMetaData {
     private final Map<String, byte[]> attributes;
@@ -51,7 +51,7 @@ public class PhoenixIndexMetaData implements IndexMetaData {
         byte[] txState = attributes.get(BaseScannerRegionObserver.TX_STATE);
         if (md != null) {
             final List<IndexMaintainer> indexMaintainers = IndexMaintainer.deserialize(md);
-            final Transaction txn = MutationState.decodeTransaction(txState);
+            final PhoenixTransactionContext txnContext = TransactionFactory.getTransactionFactory().getTransactionContext(txState);
             return new IndexMetaDataCache() {
 
                 @Override
@@ -63,8 +63,8 @@ public class PhoenixIndexMetaData implements IndexMetaData {
                 }
 
                 @Override
-                public Transaction getTransaction() {
-                    return txn;
+                public PhoenixTransactionContext getTransactionContext() {
+                    return txnContext;
                 }
 
             };
@@ -96,8 +96,8 @@ public class PhoenixIndexMetaData implements IndexMetaData {
         this.ignoreNewerMutations = attributes.get(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS) != null;
     }
     
-    public Transaction getTransaction() {
-        return indexMetaDataCache.getTransaction();
+    public PhoenixTransactionContext getTransactionContext() {
+        return indexMetaDataCache.getTransactionContext();
     }
     
     public List<IndexMaintainer> getIndexMaintainers() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index fdf5498..a418c24 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -69,14 +69,14 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.trace.TracingUtils;
 import org.apache.phoenix.trace.util.NullSpan;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
+import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
+import org.apache.phoenix.transaction.PhoenixTransactionalTable;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.TransactionUtil;
-import org.apache.tephra.Transaction;
-import org.apache.tephra.Transaction.VisibilityLevel;
-import org.apache.tephra.TxConstants;
-import org.apache.tephra.hbase.TransactionAwareHTable;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -149,7 +149,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
 
         Map<String,byte[]> updateAttributes = m.getAttributesMap();
         PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaData(c.getEnvironment(),updateAttributes);
-        byte[] txRollbackAttribute = m.getAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY);
+        byte[] txRollbackAttribute = m.getAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY);
         Collection<Pair<Mutation, byte[]>> indexUpdates = null;
         // get the current span, or just use a null-span to avoid a bunch of if statements
         try (TraceScope scope = Trace.startSpan("Starting to build index updates")) {
@@ -186,14 +186,14 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
     }
     
     private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException {
-        Transaction tx = indexMetaData.getTransaction();
-        if (tx == null) {
+        PhoenixTransactionContext txnContext = indexMetaData.getTransactionContext();
+        if (txnContext == null) {
             throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
         }
         boolean isRollback = txRollbackAttribute!=null;
         boolean isImmutable = indexMetaData.isImmutableRows();
         ResultScanner currentScanner = null;
-        TransactionAwareHTable txTable = null;
+        PhoenixTransactionalTable txTable = null;
         // Collect up all mutations in batch
         Map<ImmutableBytesPtr, MultiMutation> mutations =
                 new HashMap<ImmutableBytesPtr, MultiMutation>();
@@ -250,23 +250,22 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
                 scanRanges.initializeScan(scan);
                 TableName tableName = env.getRegion().getRegionInfo().getTable();
                 HTableInterface htable = env.getTable(tableName);
-                txTable = new TransactionAwareHTable(htable);
-                txTable.startTx(tx);
+                txTable = TransactionFactory.getTransactionFactory().getTransactionalTable(txnContext, htable);
                 // For rollback, we need to see all versions, including
                 // the last committed version as there may be multiple
                 // checkpointed versions.
                 SkipScanFilter filter = scanRanges.getSkipScanFilter();
                 if (isRollback) {
                     filter = new SkipScanFilter(filter,true);
-                    tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL);
+                    txnContext.setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT_ALL);
                 }
                 scan.setFilter(filter);
                 currentScanner = txTable.getScanner(scan);
             }
             if (isRollback) {
-                processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations);
+                processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, txnContext, mutableColumns, indexUpdates, mutations);
             } else {
-                processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
+                processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, txnContext, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
             }
         } finally {
             if (txTable != null) txTable.close();
@@ -289,7 +288,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
     private void processMutation(RegionCoprocessorEnvironment env,
             PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute,
             ResultScanner scanner,
-            Transaction tx, 
+            PhoenixTransactionContext txnContext, 
             Set<ColumnReference> upsertColumns, 
             Collection<Pair<Mutation, byte[]>> indexUpdates,
             Map<ImmutableBytesPtr, MultiMutation> mutations,
@@ -300,14 +299,14 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
             // Process existing data table rows by removing the old index row and adding the new index row
             while ((result = scanner.next()) != null) {
                 Mutation m = mutationsToFindPreviousValue.remove(new ImmutableBytesPtr(result.getRow()));
-                TxTableState state = new TxTableState(env, upsertColumns, indexMetaData.getAttributes(), tx.getWritePointer(), m, emptyColRef, result);
+                TxTableState state = new TxTableState(env, upsertColumns, indexMetaData.getAttributes(), txnContext.getWritePointer(), m, emptyColRef, result);
                 generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
                 generatePuts(indexMetaData, indexUpdates, state);
             }
         }
         // Process new data table by adding new index rows
         for (Mutation m : mutations.values()) {
-            TxTableState state = new TxTableState(env, upsertColumns, indexMetaData.getAttributes(), tx.getWritePointer(), m);
+            TxTableState state = new TxTableState(env, upsertColumns, indexMetaData.getAttributes(), txnContext.getWritePointer(), m);
             generatePuts(indexMetaData, indexUpdates, state);
         }
     }
@@ -315,7 +314,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
     private void processRollback(RegionCoprocessorEnvironment env,
             PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute,
             ResultScanner scanner,
-            Transaction tx, Set<ColumnReference> mutableColumns,
+            PhoenixTransactionContext tx, Set<ColumnReference> mutableColumns,
             Collection<Pair<Mutation, byte[]>> indexUpdates,
             Map<ImmutableBytesPtr, MultiMutation> mutations) throws IOException {
         if (scanner != null) {
@@ -402,7 +401,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
         Iterable<IndexUpdate> deletes = codec.getIndexDeletes(state, indexMetaData);
         for (IndexUpdate delete : deletes) {
             if (delete.isValid()) {
-                delete.getUpdate().setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, attribValue);
+                delete.getUpdate().setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, attribValue);
                 indexUpdates.add(new Pair<Mutation, byte[]>(delete.getUpdate(),delete.getTableName()));
             }
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index cb2390e..d387ab7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -103,6 +103,7 @@ import org.apache.phoenix.schema.types.PUnsignedDate;
 import org.apache.phoenix.schema.types.PUnsignedTime;
 import org.apache.phoenix.schema.types.PUnsignedTimestamp;
 import org.apache.phoenix.trace.util.Tracing;
+import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.JDBCUtil;
 import org.apache.phoenix.util.NumberUtil;
@@ -629,7 +630,7 @@ public class PhoenixConnection implements Connection, MetaDataMutated, SQLClosea
         mutationState.sendUncommitted();
     }
         
-    public void setTransactionContext(TransactionContext txContext) throws SQLException {
+    public void setTransactionContext(PhoenixTransactionContext txContext) throws SQLException {
         if (!this.services.getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED)) {
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT)
             .build().buildException();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index 596cf73..8a4e284 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -85,4 +85,22 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
         // TODO Auto-generated method stub
         return null;
     }
+
+    @Override
+    public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) {
+        // TODO Auto-generated method stub
+        
+    }
+
+    @Override
+    public byte[] encodeTransaction() throws SQLException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public long getMaxTransactionsPerSecond() {
+        // TODO Auto-generated method stub
+        return 0;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index 2d0d5b7..bd63930 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -1,7 +1,6 @@
 package org.apache.phoenix.transaction;
 
 import org.apache.phoenix.schema.PTable;
-import org.apache.tephra.Transaction.VisibilityLevel;
 import org.slf4j.Logger;
 
 import java.sql.SQLException;
@@ -20,6 +19,8 @@ public interface PhoenixTransactionContext {
         SNAPSHOT_ALL
       }
 
+    public static final String TX_ROLLBACK_ATTRIBUTE_KEY = "phoenix.tx.rollback"; 
+
     /**
      * Starts a transaction
      *
@@ -87,20 +88,36 @@ public interface PhoenixTransactionContext {
     /**
      * Returns transaction unique identifier
      */
-    long getTransactionId();
+    public long getTransactionId();
 
     /**
      * Returns transaction snapshot id
      */
-    long getReadPointer();
+    public long getReadPointer();
 
     /**
      * Returns transaction write pointer. After checkpoint the write pointer is different than the initial one  
      */
-    long getWritePointer();
+    public long getWritePointer();
+
+    /**
+     * Set visibility level
+     */
+    public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel);
 
     /**
-     * Returns visibility level 
+     * Returns visibility level
+     */
+    public PhoenixVisibilityLevel getVisibilityLevel();
+
+    /**
+     * Encode transaction
+     */
+    public byte[] encodeTransaction() throws SQLException;
+
+    /**
+     * 
+     * @return max transactions per second
      */
-    PhoenixVisibilityLevel getVisibilityLevel();    
+    public long getMaxTransactionsPerSecond();
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index f8096d5..cfa3ac3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -1,5 +1,6 @@
 package org.apache.phoenix.transaction;
 
+import java.io.IOException;
 import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
@@ -11,14 +12,20 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
 import org.apache.tephra.Transaction;
 import org.apache.tephra.TransactionAware;
+import org.apache.tephra.TransactionCodec;
 import org.apache.tephra.TransactionConflictException;
 import org.apache.tephra.TransactionContext;
 import org.apache.tephra.TransactionFailureException;
+import org.apache.tephra.TransactionManager;
 import org.apache.tephra.TransactionSystemClient;
 import org.apache.tephra.Transaction.VisibilityLevel;
+import org.apache.tephra.TxConstants;
+import org.apache.tephra.inmemory.InMemoryTxSystemClient;
 import org.apache.tephra.visibility.FenceWait;
 import org.apache.tephra.visibility.VisibilityFence;
 
@@ -28,12 +35,24 @@ import org.slf4j.Logger;
 
 public class TephraTransactionContext implements PhoenixTransactionContext {
 
+    private static final TransactionCodec CODEC = new TransactionCodec();
+
     private final List<TransactionAware> txAwares;
     private final TransactionContext txContext;
     private Transaction tx;
     private TransactionSystemClient txServiceClient;
     private TransactionFailureException e;
 
+    public TephraTransactionContext() {
+        this.txServiceClient = null;
+        this.txAwares = Lists.newArrayList();
+        this.txContext = null;
+    }
+
+    public TephraTransactionContext(byte[] txnBytes) throws IOException {
+        this();
+        this.tx = (txnBytes != null && txnBytes.length > 0) ? CODEC.decode(txnBytes) : null;
+    }
 
     public TephraTransactionContext(PhoenixConnection connection) {
         this.txServiceClient = connection.getQueryServices().getTransactionSystemClient();
@@ -65,6 +84,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build().buildException();
         }
 
+        System.out.println("BEGIN");
         try {
             txContext.start();
         } catch (TransactionFailureException e) {
@@ -150,6 +170,14 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         }
     }
 
+    private Transaction getCurrentTransaction() {
+        if (this.txContext != null) {
+            return this.txContext.getCurrentTransaction();
+        }
+
+        return this.tx;
+    }
+
     @Override
     public void commitDDLFence(PTable dataTable, Logger logger) throws SQLException {
         byte[] key = dataTable.getName().getBytes();
@@ -159,7 +187,7 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
             fenceWait.await(10000, TimeUnit.MILLISECONDS);
             
             if (logger.isInfoEnabled()) {
-                logger.info("Added write fence at ~" + getTransaction().getReadPointer());
+                logger.info("Added write fence at ~" + getCurrentTransaction().getReadPointer());
             }
         } catch (InterruptedException e) {
             Thread.currentThread().interrupt();
@@ -199,8 +227,6 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         assert(ctx instanceof TephraTransactionContext);
         TephraTransactionContext tephraContext = (TephraTransactionContext) ctx;
 
-        tephraContext.getAwares();
-
         if (txContext != null) {
             for (TransactionAware txAware : tephraContext.getAwares()) {
                 txContext.addTransactionAware(txAware);
@@ -269,6 +295,33 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         return HConstants.LATEST_TIMESTAMP;
     }
 
+    @Override
+    public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) {
+        VisibilityLevel tephraVisibilityLevel = null;
+
+        switch(visibilityLevel) {
+        case SNAPSHOT:
+            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT;
+            break;
+        case SNAPSHOT_EXCLUDE_CURRENT:
+            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
+            break;
+        case SNAPSHOT_ALL:
+            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_ALL;
+            break;
+        default:
+            assert(false);               
+        }
+
+        if (this.txContext != null) {
+            txContext.getCurrentTransaction().setVisibility(tephraVisibilityLevel);
+        } else if (tx != null) {
+            tx.setVisibility(tephraVisibilityLevel);
+        } else {
+            assert(false);
+        }
+    }
+    
     // For testing
     @Override
     public PhoenixVisibilityLevel getVisibilityLevel() {
@@ -297,7 +350,33 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
         return phoenixVisibilityLevel;
     }
 
-   /**
+    @Override
+    public byte[] encodeTransaction() throws SQLException {
+
+        Transaction transaction = null;
+
+        if (this.txContext != null) {
+            transaction = txContext.getCurrentTransaction();
+        } else if (tx != null) {
+            transaction =  tx;
+        }
+
+        assert (transaction != null);
+
+        try {
+            return CODEC.encode(transaction);
+        } catch (IOException e) {
+            throw new SQLException(e);
+        }
+    }
+    
+    @Override
+    public long getMaxTransactionsPerSecond() {
+        return TxConstants.MAX_TX_PER_MS;
+    }
+
+
+    /**
     * TephraTransactionContext specific functions
     */
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java
new file mode 100644
index 0000000..ba80d02
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java
@@ -0,0 +1,126 @@
+package org.apache.phoenix.transaction;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+
+public class TransactionFactory {
+
+    static private TransactionFactory transactionFactory = null;
+
+    private TransactionProcessor tp = TransactionProcessor.Tephra;
+
+    enum TransactionProcessor {
+        Tephra,
+        Omid
+    }
+
+    private TransactionFactory(TransactionProcessor tp) {
+        this.tp = tp;
+    }
+
+    static public void createTransactionFactory(TransactionProcessor tp) {
+        if (transactionFactory == null) {
+            transactionFactory = new TransactionFactory(tp);
+        }
+    }
+
+    static public TransactionFactory getTransactionFactory() {
+        if (transactionFactory == null) {
+            createTransactionFactory(TransactionProcessor.Tephra);
+        }
+
+        return transactionFactory;
+    }
+
+    public PhoenixTransactionContext getTransactionContext()  {
+
+        PhoenixTransactionContext ctx = null;
+
+        switch(tp) {
+        case Tephra:
+            ctx = new TephraTransactionContext();
+            break;
+        case Omid:
+            ctx = new OmidTransactionContext();
+            break;
+        default:
+            ctx = null;
+        }
+        
+        return ctx;
+    }
+
+    public PhoenixTransactionContext getTransactionContext(byte[] txnBytes) throws IOException {
+
+        PhoenixTransactionContext ctx = null;
+
+        switch(tp) {
+        case Tephra:
+            ctx = new TephraTransactionContext(txnBytes);
+            break;
+        case Omid:
+//            ctx = new OmidTransactionContext(txnBytes);
+            break;
+        default:
+            ctx = null;
+        }
+        
+        return ctx;
+    }
+    
+    public PhoenixTransactionContext getTransactionContext(PhoenixConnection connection) {
+
+        PhoenixTransactionContext ctx = null;
+
+        switch(tp) {
+        case Tephra:
+            ctx = new TephraTransactionContext(connection);
+            break;
+        case Omid:
+//            ctx = new OmidTransactionContext(connection);
+            break;
+        default:
+            ctx = null;
+        }
+        
+        return ctx;
+    }
+
+    public PhoenixTransactionContext getTransactionContext(PhoenixTransactionContext contex, PhoenixConnection connection, boolean subTask) {
+
+        PhoenixTransactionContext ctx = null;
+
+        switch(tp) {
+        case Tephra:
+            ctx = new TephraTransactionContext(contex, connection, subTask);
+            break;
+        case Omid:
+//            ctx = new OmidTransactionContext(contex, connection, subTask);
+            break;
+        default:
+            ctx = null;
+        }
+        
+        return ctx;
+    }
+
+    public PhoenixTransactionalTable getTransactionalTable(PhoenixTransactionContext ctx, HTableInterface htable) {
+
+        PhoenixTransactionalTable table = null;
+
+        switch(tp) {
+        case Tephra:
+            table = new TephraTransactionTable(ctx, htable);
+            break;
+        case Omid:
+//            table = new OmidTransactionContext(contex, connection, subTask);
+            break;
+        default:
+            table = null;
+        }
+        
+        return table;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f584e5f1/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
index 4fbbe57..94a56b8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
@@ -32,10 +32,8 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.transaction.PhoenixTransactionalTable;
 import org.apache.phoenix.transaction.TephraTransactionTable;
-import org.apache.tephra.TransactionConflictException;
-import org.apache.tephra.TransactionFailureException;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.tephra.TxConstants;
-import org.apache.tephra.hbase.TransactionAwareHTable;
 
 public class TransactionUtil {
     private TransactionUtil() {
@@ -46,11 +44,11 @@ public class TransactionUtil {
     }
     
     public static long convertToNanoseconds(long serverTimeStamp) {
-        return serverTimeStamp * TxConstants.MAX_TX_PER_MS;
+        return serverTimeStamp * TransactionFactory.getTransactionFactory().getTransactionContext().getMaxTransactionsPerSecond();
     }
     
     public static long convertToMilliseconds(long serverTimeStamp) {
-        return serverTimeStamp / TxConstants.MAX_TX_PER_MS;
+        return serverTimeStamp / TransactionFactory.getTransactionFactory().getTransactionContext().getMaxTransactionsPerSecond();
     }
     
     public static PhoenixTransactionalTable getPhoenixTransactionTable(PhoenixTransactionContext phoenixTransactionContext, HTableInterface htable, boolean isImmutableRows) {


[21/46] phoenix git commit: Merge remote-tracking branch 'upstream/omid'

Posted by td...@apache.org.
Merge remote-tracking branch 'upstream/omid'


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6f7d42f2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6f7d42f2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6f7d42f2

Branch: refs/heads/omid
Commit: 6f7d42f23580f280aac4874ebe845775cf3a6d8c
Parents: e34431c 6b16183
Author: Ohad Shacham <oh...@yahoo-inc.com>
Authored: Thu Apr 13 12:10:53 2017 +0300
Committer: Ohad Shacham <oh...@yahoo-inc.com>
Committed: Thu Apr 13 12:10:53 2017 +0300

----------------------------------------------------------------------
 .../transaction/OmidTransactionContext.java     |  77 +++++
 .../transaction/OmidTransactionTable.java       | 323 +++++++++++++++++++
 .../transaction/PhoenixTransactionContext.java  |  83 +++++
 .../transaction/PhoenixTransactionalTable.java  | 133 ++++++++
 .../transaction/TephraTransactionContext.java   | 285 ++++++++++++++++
 .../transaction/TephraTransactionTable.java     | 303 +++++++++++++++++
 6 files changed, 1204 insertions(+)
----------------------------------------------------------------------