You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by pb...@apache.org on 2018/02/11 20:27:14 UTC

[1/7] phoenix git commit: PHOENIX-4130 Avoid server retries for mutable indexes

Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.11.2 00940b343 -> 06ecae7a0


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index cd23dc5..bc2b625 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -119,6 +119,25 @@ public class ServerUtil {
         }
         return new PhoenixIOException(t);
     }
+
+    /**
+     * Return the first SQLException in the exception chain, otherwise parse it.
+     * When we're receiving an exception locally, there's no need to string parse,
+     * as the SQLException will already be part of the chain.
+     * @param t
+     * @return the SQLException, or null if none found
+     */
+    public static SQLException parseLocalOrRemoteServerException(Throwable t) {
+        while (t.getCause() != null) {
+            if (t instanceof NotServingRegionException) {
+                return parseRemoteException(new StaleRegionBoundaryCacheException());
+            } else if (t instanceof SQLException) {
+                return (SQLException) t;
+            }
+            t = t.getCause();
+        }
+        return parseRemoteException(t);
+    }
     
     public static SQLException parseServerExceptionOrNull(Throwable t) {
         while (t.getCause() != null) {
@@ -196,7 +215,7 @@ public class ServerUtil {
         return parseTimestampFromRemoteException(t);
     }
 
-    private static long parseTimestampFromRemoteException(Throwable t) {
+    public static long parseTimestampFromRemoteException(Throwable t) {
         String message = t.getLocalizedMessage();
         if (message != null) {
             // If the message matches the standard pattern, recover the SQLException and throw it.
@@ -216,7 +235,7 @@ public class ServerUtil {
             msg = "";
         }
         if (t instanceof SQLException) {
-            msg = constructSQLErrorMessage((SQLException) t, msg);
+            msg = t.getMessage() + " " + msg;
         }
         msg += String.format(FORMAT_FOR_TIMESTAMP, timestamp);
         return new DoNotRetryIOException(msg, t);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index b0e3780..918c411 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -105,6 +105,10 @@ public class TestIndexWriter {
     Configuration conf =new Configuration();
     Mockito.when(e.getConfiguration()).thenReturn(conf);
     Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
+    Region mockRegion = Mockito.mock(Region.class);
+    Mockito.when(e.getRegion()).thenReturn(mockRegion);
+    HTableDescriptor mockTableDesc = Mockito.mock(HTableDescriptor.class);
+    Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc);
     ExecutorService exec = Executors.newFixedThreadPool(1);
     Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
     FakeTableFactory factory = new FakeTableFactory(tables);
@@ -161,6 +165,10 @@ public class TestIndexWriter {
     Configuration conf =new Configuration();
     Mockito.when(e.getConfiguration()).thenReturn(conf);
     Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
+    Region mockRegion = Mockito.mock(Region.class);
+    Mockito.when(e.getRegion()).thenReturn(mockRegion);
+    HTableDescriptor mockTableDesc = Mockito.mock(HTableDescriptor.class);
+    Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc);
     FakeTableFactory factory = new FakeTableFactory(tables);
 
     byte[] tableName = this.testName.getTableName();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index 3e2b47c..bfe1d0d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -30,11 +30,13 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.phoenix.hbase.index.StubAbortable;
@@ -93,6 +95,10 @@ public class TestParalleIndexWriter {
     Configuration conf =new Configuration();
     Mockito.when(e.getConfiguration()).thenReturn(conf);
     Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
+    Region mockRegion = Mockito.mock(Region.class);
+    Mockito.when(e.getRegion()).thenReturn(mockRegion);
+    HTableDescriptor mockTableDesc = Mockito.mock(HTableDescriptor.class);
+    Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc);
     ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
     Put m = new Put(row);
     m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 32a6661..6f0881b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -30,11 +30,13 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.phoenix.hbase.index.StubAbortable;
@@ -88,6 +90,10 @@ public class TestParalleWriterIndexCommitter {
     Configuration conf =new Configuration();
     Mockito.when(e.getConfiguration()).thenReturn(conf);
     Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
+    Region mockRegion = Mockito.mock(Region.class);
+    Mockito.when(e.getRegion()).thenReturn(mockRegion);
+    HTableDescriptor mockTableDesc = Mockito.mock(HTableDescriptor.class);
+    Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc);
     Stoppable stop = Mockito.mock(Stoppable.class);
     ExecutorService exec = Executors.newFixedThreadPool(1);
     Map<ImmutableBytesPtr, HTableInterface> tables =


[3/7] phoenix git commit: PHOENIX-4130 Avoid server retries for mutable indexes (Addendum)

Posted by pb...@apache.org.
PHOENIX-4130 Avoid server retries for mutable indexes (Addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/21217292
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/21217292
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/21217292

Branch: refs/heads/4.x-cdh5.11.2
Commit: 21217292bd77086039b196c867cca5f20e44b463
Parents: b539cd6
Author: Vincent Poon <vi...@apache.org>
Authored: Thu Feb 1 00:33:01 2018 +0000
Committer: Pedro Boado <pb...@apache.org>
Committed: Sun Feb 11 15:54:19 2018 +0000

----------------------------------------------------------------------
 .../end2end/index/PartialIndexRebuilderIT.java  |  3 +--
 .../index/exception/IndexWriteException.java    | 21 +++++++++++---------
 .../MultiIndexWriteFailureException.java        | 14 +++++++++++--
 .../SingleIndexWriteFailureException.java       | 15 +++++++++++---
 .../index/PhoenixIndexFailurePolicy.java        | 18 +++++++++++++++++
 5 files changed, 55 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/21217292/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index dd986aa..3961d32 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -1098,8 +1098,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
                     conn.createStatement().execute("DELETE FROM " + fullTableName);
                     fail();
                 } catch (SQLException e) {
-                    // Expected
-                    assertEquals(SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode(), e.getErrorCode());
+                    // expected
                 }
                 assertTrue(TestUtil.checkIndexState(conn, fullIndexName, PIndexState.DISABLE, null));
             } finally {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/21217292/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java
index 531baa6..5dc6f60 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java
@@ -22,6 +22,8 @@ import java.util.regex.Pattern;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.phoenix.query.QueryServicesOptions;
 
+import com.google.common.base.Objects;
+
 /**
  * Generic {@link Exception} that an index write has failed
  */
@@ -33,7 +35,7 @@ public class IndexWriteException extends HBaseIOException {
      * server side.
      */
     private static final String DISABLE_INDEX_ON_FAILURE_MSG = "disableIndexOnFailure=";
-    private boolean disableIndexOnFailure;
+    private boolean disableIndexOnFailure = QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX;
 
   public IndexWriteException() {
     super();
@@ -49,19 +51,15 @@ public class IndexWriteException extends HBaseIOException {
       super(message, cause);
   }
 
-  public IndexWriteException(String message, Throwable cause, boolean disableIndexOnFailure) {
-    super(prependDisableIndexMsg(message, disableIndexOnFailure), cause);
+  public IndexWriteException(Throwable cause, boolean disableIndexOnFailure) {
+    super(cause);
+    this.disableIndexOnFailure = disableIndexOnFailure;
   }
 
-  public IndexWriteException(String message, boolean disableIndexOnFailure) {
-    super(prependDisableIndexMsg(message, disableIndexOnFailure));
+  public IndexWriteException(boolean disableIndexOnFailure) {
     this.disableIndexOnFailure = disableIndexOnFailure;
   }
 
-  private static String prependDisableIndexMsg(String message, boolean disableIndexOnFailure) {
-    return DISABLE_INDEX_ON_FAILURE_MSG + disableIndexOnFailure + " " + message;
-}
-
 public IndexWriteException(Throwable cause) {
     super(cause);
   }
@@ -81,4 +79,9 @@ public IndexWriteException(Throwable cause) {
     public boolean isDisableIndexOnFailure() {
         return disableIndexOnFailure;
     }
+
+    @Override
+    public String getMessage() {
+        return Objects.firstNonNull(super.getMessage(), "") + " " + DISABLE_INDEX_ON_FAILURE_MSG + disableIndexOnFailure + ",";
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/21217292/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java
index d593791..a14e8a5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
+import com.google.common.base.Objects;
 import com.google.common.collect.Lists;
 
 /**
@@ -40,7 +41,7 @@ public class MultiIndexWriteFailureException extends IndexWriteException {
    * @param failures the tables to which the index write did not succeed
    */
   public MultiIndexWriteFailureException(List<HTableInterfaceReference> failures, boolean disableIndexOnFailure) {
-    super(FAILURE_MSG + failures, disableIndexOnFailure);
+    super(disableIndexOnFailure);
     this.failures = failures;
   }
 
@@ -50,7 +51,7 @@ public class MultiIndexWriteFailureException extends IndexWriteException {
    * @param message detail message
    */
   public MultiIndexWriteFailureException(String message) {
-      super(message, IndexWriteException.parseDisableIndexOnFailure(message));
+      super(IndexWriteException.parseDisableIndexOnFailure(message));
       Pattern p = Pattern.compile(FAILURE_MSG + "\\[(.*)\\]");
       Matcher m = p.matcher(message);
       if (m.find()) {
@@ -66,4 +67,13 @@ public class MultiIndexWriteFailureException extends IndexWriteException {
   public List<HTableInterfaceReference> getFailedTables() {
     return this.failures;
   }
+
+  public void setFailedTables(List<HTableInterfaceReference> failedTables) {
+      this.failures = failedTables;
+  }
+
+  @Override
+    public String getMessage() {
+        return Objects.firstNonNull(super.getMessage(),"") + " " + FAILURE_MSG + failures;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/21217292/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
index 610a82a..67dabba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
@@ -23,6 +23,8 @@ import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.client.Mutation;
 
+import com.google.common.base.Objects;
+
 /**
  * Exception thrown if we cannot successfully write to an index table.
  */
@@ -31,6 +33,7 @@ public class SingleIndexWriteFailureException extends IndexWriteException {
 
   public static final String FAILED_MSG = "Failed to make index update:";
   private String table;
+  private String mutationsMsg;
 
   /**
    * Cannot reach the index, but not sure of the table or the mutations that caused the failure
@@ -49,9 +52,9 @@ public class SingleIndexWriteFailureException extends IndexWriteException {
    */
   public SingleIndexWriteFailureException(String targetTableName, List<Mutation> mutations,
       Exception cause, boolean disableIndexOnFailure) {
-    super(FAILED_MSG + "\n\t table: " + targetTableName + "\n\t edits: " + mutations
-        + "\n\tcause: " + cause == null ? "UNKNOWN" : cause.getMessage(), cause, disableIndexOnFailure);
+    super(cause, disableIndexOnFailure);
     this.table = targetTableName;
+    this.mutationsMsg = mutations.toString();
   }
 
   /**
@@ -60,7 +63,7 @@ public class SingleIndexWriteFailureException extends IndexWriteException {
    * @param message detail message
    */
   public SingleIndexWriteFailureException(String msg) {
-      super(msg, IndexWriteException.parseDisableIndexOnFailure(msg));
+      super(IndexWriteException.parseDisableIndexOnFailure(msg));
       Pattern pattern = Pattern.compile(FAILED_MSG + ".* table: ([\\S]*)\\s.*", Pattern.DOTALL);
       Matcher m = pattern.matcher(msg);
       if (m.find()) {
@@ -75,4 +78,10 @@ public class SingleIndexWriteFailureException extends IndexWriteException {
   public String getTableName() {
     return this.table;
   }
+
+  @Override
+    public String getMessage() {
+      return Objects.firstNonNull(super.getMessage(), "") + " " + FAILED_MSG + "\n\t table: " + this.table + "\n\t edits: " + mutationsMsg
+      + "\n\tcause: " + getCause() == null ? "UNKNOWN" : getCause().getMessage();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/21217292/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 14f8307..55192e7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
@@ -53,6 +54,7 @@ import org.apache.phoenix.hbase.index.exception.IndexWriteException;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.write.DelegateIndexFailurePolicy;
 import org.apache.phoenix.hbase.index.write.KillServerOnFailurePolicy;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -70,6 +72,9 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
 
 /**
@@ -208,6 +213,19 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 for (String tableName : getLocalIndexNames(ref, mutations)) {
                     indexTableNames.put(tableName, minTimeStamp);
                 }
+                // client disables the index, so we pass the index names in the thrown exception
+                if (cause instanceof MultiIndexWriteFailureException) {
+                    List<HTableInterfaceReference> failedLocalIndexes =
+                            Lists.newArrayList(Iterables.transform(indexTableNames.entrySet(),
+                                new Function<Map.Entry<String, Long>, HTableInterfaceReference>() {
+                                    @Override
+                                    public HTableInterfaceReference apply(Entry<String, Long> input) {
+                                        return new HTableInterfaceReference(new ImmutableBytesPtr(
+                                                Bytes.toBytes(input.getKey())));
+                                    }
+                                }));
+                    ((MultiIndexWriteFailureException) cause).setFailedTables(failedLocalIndexes);
+                }
             } else {
                 indexTableNames.put(ref.getTableName(), minTimeStamp);
             }


[5/7] phoenix git commit: PHOENIX-4588 Clone expression also if it's children have Determinism.PER_INVOCATION

Posted by pb...@apache.org.
PHOENIX-4588 Clone expression also if it's children have Determinism.PER_INVOCATION


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/92bd6d68
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/92bd6d68
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/92bd6d68

Branch: refs/heads/4.x-cdh5.11.2
Commit: 92bd6d68229b6689e2a97416593708e3cbcb0960
Parents: 3ef7b1f
Author: Ankit Singhal <an...@gmail.com>
Authored: Fri Feb 9 06:16:56 2018 +0000
Committer: Pedro Boado <pb...@apache.org>
Committed: Sun Feb 11 15:56:04 2018 +0000

----------------------------------------------------------------------
 .../org/apache/phoenix/expression/BaseSingleExpression.java     | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/92bd6d68/phoenix-core/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java
index c0e2fea..fbe8040 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java
@@ -113,4 +113,9 @@ public abstract class BaseSingleExpression extends BaseExpression {
     public boolean requiresFinalEvaluation() {
         return children.get(0).requiresFinalEvaluation();
     }
+
+    @Override
+    public Determinism getDeterminism() {
+        return children.get(0).getDeterminism();
+    }
 }


[4/7] phoenix git commit: PHOENIX-4546 Projected table cannot be read through ProjectedColumnExpression

Posted by pb...@apache.org.
PHOENIX-4546 Projected table cannot be read through ProjectedColumnExpression


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3ef7b1f0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3ef7b1f0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3ef7b1f0

Branch: refs/heads/4.x-cdh5.11.2
Commit: 3ef7b1f021cd931b848d54ff494525b7756fd466
Parents: 2121729
Author: Ankit Singhal <an...@gmail.com>
Authored: Thu Feb 8 07:05:21 2018 +0000
Committer: Pedro Boado <pb...@apache.org>
Committed: Sun Feb 11 15:55:46 2018 +0000

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/DeleteIT.java    | 36 ++++++++++++++++++++
 .../compile/TupleProjectionCompiler.java        | 32 ++++++++++++-----
 2 files changed, 60 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3ef7b1f0/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
index e111e7a..498aeff 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
@@ -710,6 +710,41 @@ public class DeleteIT extends ParallelStatsDisabledIT {
     public void testClientSideDeleteAutoCommitOn() throws Exception {
         testDeleteCount(true, 1000);
     }
+
+    @Test
+    public void testPointDeleteWithMultipleImmutableIndexes() throws Exception {
+        testPointDeleteWithMultipleImmutableIndexes(false);
+    }
+
+    @Test
+    public void testPointDeleteWithMultipleImmutableIndexesAfterAlter() throws Exception {
+        testPointDeleteWithMultipleImmutableIndexes(true);
+    }
+
+    private void testPointDeleteWithMultipleImmutableIndexes(boolean alterTable) throws Exception {
+        String tableName = generateUniqueName();
+        String commands = "CREATE TABLE IF NOT EXISTS " + tableName
+                + " (ID INTEGER PRIMARY KEY,double_id DOUBLE,varchar_id VARCHAR (30)) "
+                + (alterTable ? ";ALTER TABLE " + tableName + " set " : "") + "IMMUTABLE_ROWS=true;"
+                + "CREATE INDEX IF NOT EXISTS index_column_varchar_id ON " + tableName + "(varchar_id);"
+                + "CREATE INDEX IF NOT EXISTS index_column_double_id ON " + tableName + "(double_id);" + "UPSERT INTO "
+                + tableName + " VALUES (9000000,0.5,'Sample text extra');" ;
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            conn.setAutoCommit(true);
+            Statement stm = conn.createStatement();
+            for (String sql : commands.split(";")) {
+                stm.execute(sql);
+            }
+            ResultSet rs = stm.executeQuery("select id,varchar_id,double_id from " + tableName + " WHERE ID=9000000");
+            assertTrue(rs.next());
+            assertEquals(9000000, rs.getInt(1));
+            assertEquals("Sample text extra", rs.getString(2));
+            assertEquals(0.5, rs.getDouble(3),0.01);
+            stm.execute("DELETE FROM " + tableName + " WHERE ID=9000000");
+            assertDeleted(conn, tableName, "index_column_varchar_id", "index_column_double_id", null);
+            stm.close();
+        }
+    }
     
     private void testDeleteCount(boolean autoCommit, Integer limit) throws Exception {
         String tableName = generateUniqueName();
@@ -735,6 +770,7 @@ public class DeleteIT extends ParallelStatsDisabledIT {
         }
 
     }
+    
 
     @Test
     public void testClientSideDeleteShouldNotFailWhenSameColumnPresentInMultipleIndexes()

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3ef7b1f0/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
index 9883de6..d0b900c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
@@ -22,6 +22,7 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Set;
 
@@ -103,6 +104,10 @@ public class TupleProjectionCompiler {
                     for (PColumn column : parentTableRef.getTable().getColumnFamily(familyName).getColumns()) {
                         NODE_FACTORY.column(null, '"' + IndexUtil.getIndexColumnName(column) + '"', null).accept(visitor);
                     }
+                }else{
+                    for (PColumn column : table.getColumnFamily(familyName).getColumns()) {
+                        NODE_FACTORY.column(TableName.create(null, familyName), '"' + column.getName().getString() + '"', null).accept(visitor);
+                    }
                 }
                 families.add(familyName);
             } else {
@@ -125,16 +130,22 @@ public class TupleProjectionCompiler {
                     position++, sourceColumn.isNullable(), sourceColumnRef, null);
             projectedColumns.add(column);
         }
+
+        List<ColumnRef> nonPkColumnRefList = new ArrayList<ColumnRef>(visitor.nonPkColumnRefSet);
         for (PColumn sourceColumn : table.getColumns()) {
             if (SchemaUtil.isPKColumn(sourceColumn))
                 continue;
             ColumnRef sourceColumnRef = new ColumnRef(tableRef, sourceColumn.getPosition());
             if (!isWildcard 
-                    && !visitor.columnRefSet.contains(sourceColumnRef)
+                    && !visitor.nonPkColumnRefSet.contains(sourceColumnRef)
                     && !families.contains(sourceColumn.getFamilyName().getString()))
                 continue;
-            PColumn column = new ProjectedColumn(sourceColumn.getName(), sourceColumn.getFamilyName(), 
-                    position++, sourceColumn.isNullable(), sourceColumnRef, sourceColumn.getColumnQualifierBytes());
+
+            PColumn column = new ProjectedColumn(sourceColumn.getName(), sourceColumn.getFamilyName(),
+                    visitor.nonPkColumnRefSet.contains(sourceColumnRef)
+                            ? position + nonPkColumnRefList.indexOf(sourceColumnRef) : position++,
+                    sourceColumn.isNullable(), sourceColumnRef, sourceColumn.getColumnQualifierBytes());
+
             projectedColumns.add(column);
             // Wildcard or FamilyWildcard will be handled by ProjectionCompiler.
             if (!isWildcard && !families.contains(sourceColumn.getFamilyName())) {
@@ -142,6 +153,7 @@ public class TupleProjectionCompiler {
             }
         }
         // add LocalIndexDataColumnRef
+        position = projectedColumns.size();
         for (LocalIndexDataColumnRef sourceColumnRef : visitor.localIndexColumnRefSet) {
             PColumn column = new ProjectedColumn(sourceColumnRef.getColumn().getName(), 
                     sourceColumnRef.getColumn().getFamilyName(), position++, 
@@ -192,19 +204,23 @@ public class TupleProjectionCompiler {
     // For extracting column references from single select statement
     private static class ColumnRefVisitor extends StatelessTraverseAllParseNodeVisitor {
         private final StatementContext context;
-        private final Set<ColumnRef> columnRefSet;
-        private final Set<LocalIndexDataColumnRef> localIndexColumnRefSet;
+        private final LinkedHashSet<ColumnRef> nonPkColumnRefSet;
+        private final LinkedHashSet<LocalIndexDataColumnRef> localIndexColumnRefSet;
         
         private ColumnRefVisitor(StatementContext context) {
             this.context = context;
-            this.columnRefSet = new HashSet<ColumnRef>();
-            this.localIndexColumnRefSet = new HashSet<LocalIndexDataColumnRef>();
+            this.nonPkColumnRefSet = new LinkedHashSet<ColumnRef>();
+            this.localIndexColumnRefSet = new LinkedHashSet<LocalIndexDataColumnRef>();
         }
 
         @Override
         public Void visit(ColumnParseNode node) throws SQLException {
             try {
-                columnRefSet.add(context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()));
+                ColumnRef resolveColumn = context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(),
+                        node.getName());
+                if (!SchemaUtil.isPKColumn(resolveColumn.getColumn())) {
+                    nonPkColumnRefSet.add(resolveColumn);
+                }
             } catch (ColumnNotFoundException e) {
                 if (context.getCurrentTable().getTable().getIndexType() == IndexType.LOCAL) {
                     try {


[2/7] phoenix git commit: PHOENIX-4130 Avoid server retries for mutable indexes

Posted by pb...@apache.org.
PHOENIX-4130 Avoid server retries for mutable indexes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b539cd62
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b539cd62
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b539cd62

Branch: refs/heads/4.x-cdh5.11.2
Commit: b539cd625bec0d625ddd1dfd61d3b4f58abeabd2
Parents: 00940b3
Author: Vincent Poon <vi...@apache.org>
Authored: Mon Jan 29 23:06:12 2018 +0000
Committer: Pedro Boado <pb...@apache.org>
Committed: Sun Feb 11 15:54:05 2018 +0000

----------------------------------------------------------------------
 .../end2end/index/MutableIndexFailureIT.java    |  12 +-
 .../end2end/index/PartialIndexRebuilderIT.java  |  76 ++++++--
 .../coprocessor/MetaDataEndpointImpl.java       |  53 ++++--
 .../phoenix/coprocessor/MetaDataProtocol.java   |   6 +-
 .../coprocessor/MetaDataRegionObserver.java     |  19 +-
 .../UngroupedAggregateRegionObserver.java       |  82 ++++++--
 .../phoenix/exception/SQLExceptionCode.java     |   1 +
 .../apache/phoenix/execute/MutationState.java   |  39 +++-
 .../org/apache/phoenix/hbase/index/Indexer.java |  10 -
 .../index/exception/IndexWriteException.java    |  49 ++++-
 .../MultiIndexWriteFailureException.java        |  29 ++-
 .../SingleIndexWriteFailureException.java       |  23 ++-
 .../hbase/index/write/IndexWriterUtils.java     |  14 +-
 .../write/ParallelWriterIndexCommitter.java     |   5 +-
 .../TrackingParallelWriterIndexCommitter.java   |   5 +-
 .../index/PhoenixIndexFailurePolicy.java        | 189 +++++++++++++++++--
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   1 +
 .../apache/phoenix/optimize/QueryOptimizer.java |  29 ++-
 .../org/apache/phoenix/query/QueryServices.java |   2 +
 .../phoenix/query/QueryServicesOptions.java     |   1 +
 .../org/apache/phoenix/schema/PIndexState.java  |   7 +-
 .../org/apache/phoenix/util/KeyValueUtil.java   |  12 ++
 .../org/apache/phoenix/util/ServerUtil.java     |  23 ++-
 .../hbase/index/write/TestIndexWriter.java      |   8 +
 .../index/write/TestParalleIndexWriter.java     |   6 +
 .../write/TestParalleWriterIndexCommitter.java  |   6 +
 26 files changed, 591 insertions(+), 116 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 0318925..c2e0cb6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -29,7 +29,6 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
@@ -130,7 +129,6 @@ public class MutableIndexFailureIT extends BaseTest {
     public static void doSetup() throws Exception {
         Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(10);
         serverProps.put("hbase.coprocessor.region.classes", FailingRegionObserver.class.getName());
-        serverProps.put(IndexWriterUtils.INDEX_WRITER_RPC_RETRIES_NUMBER, "2");
         serverProps.put(HConstants.HBASE_RPC_TIMEOUT_KEY, "10000");
         serverProps.put(IndexWriterUtils.INDEX_WRITER_RPC_PAUSE, "5000");
         serverProps.put("data.tx.snapshot.dir", "/tmp");
@@ -144,7 +142,8 @@ public class MutableIndexFailureIT extends BaseTest {
          * because we want to control it's execution ourselves
          */
         serverProps.put(QueryServices.INDEX_REBUILD_TASK_INITIAL_DELAY, Long.toString(Long.MAX_VALUE));
-        Map<String, String> clientProps = Collections.singletonMap(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString());
+        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
+        clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
         NUM_SLAVES_BASE = 4;
         setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
         indexRebuildTaskRegionEnvironment =
@@ -161,7 +160,8 @@ public class MutableIndexFailureIT extends BaseTest {
     @Parameters(name = "MutableIndexFailureIT_transactional={0},localIndex={1},isNamespaceMapped={2},disableIndexOnWriteFailure={3},failRebuildTask={4},throwIndexWriteFailure={5}") // name is used by failsafe as file name in reports
     public static List<Object[]> data() {
         return Arrays.asList(new Object[][] { 
-                { false, false, false, true, false, false},
+                // note - can't disableIndexOnWriteFailure without throwIndexWriteFailure, PHOENIX-4130
+                { false, false, false, false, false, false},
                 { false, false, true, true, false, null},
                 { false, false, true, true, false, true},
                 { false, false, false, true, false, null},
@@ -181,8 +181,8 @@ public class MutableIndexFailureIT extends BaseTest {
                 { false, true, false, true, false, null},
                 { false, false, false, true, true, null},
                 { false, false, true, true, true, null},
-                { false, false, false, true, true, false},
-                { false, false, true, true, true, false},
+                { false, false, false, false, true, false},
+                { false, false, true, false, true, false},
                 } 
         );
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index 31649bd..dd986aa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -46,12 +47,13 @@ import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
 import org.apache.phoenix.coprocessor.MetaDataRegionObserver.BuildIndexScheduleTask;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
-import org.apache.phoenix.exception.PhoenixIOException;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.execute.CommitException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PTable;
@@ -94,7 +96,9 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
         serverProps.put(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, "50000000");
         serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_PERIOD, Long.toString(REBUILD_PERIOD)); // batch at 50 seconds
         serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB, Long.toString(WAIT_AFTER_DISABLED));
-        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), ReadOnlyProps.EMPTY_PROPS);
+        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
+        clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
         indexRebuildTaskRegionEnvironment =
                 (RegionCoprocessorEnvironment) getUtility()
                         .getRSForFirstRegionInTable(
@@ -1027,6 +1031,51 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
         }
     }
 
+    // Tests that when we've been in PENDING_DISABLE for too long, queries don't use the index,
+    // and the rebuilder should mark the index DISABLED
+    @Test
+    public void testPendingDisable() throws Throwable {
+        String schemaName = generateUniqueName();
+        String tableName = generateUniqueName();
+        String indexName = generateUniqueName();
+        final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+        final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+        final MyClock clock = new MyClock(1000);
+        EnvironmentEdgeManager.injectEdge(clock);
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR) COLUMN_ENCODED_BYTES = 0, DISABLE_INDEX_ON_WRITE_FAILURE = TRUE");
+            clock.time += 100;
+            conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1, v2)");
+            clock.time += 100;
+            conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0')");
+            conn.commit();
+            clock.time += 100;
+            HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+            IndexUtil.updateIndexState(fullIndexName, clock.currentTime(), metaTable, PIndexState.PENDING_DISABLE);
+            Configuration conf =
+                    conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
+            // under threshold should use the index
+            PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
+            ResultSet rs = stmt.executeQuery("SELECT V2 FROM " + fullTableName + " WHERE V1 = 'a'");
+            assertTrue(rs.next());
+            assertEquals("0", rs.getString(1));
+            assertEquals(fullIndexName, stmt.getQueryPlan().getContext().getCurrentTable().getTable().getName().getString());
+            // over threshold should not use the index
+            long pendingDisableThreshold = conf.getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD,
+                QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD);
+            clock.time += pendingDisableThreshold + 1000;
+            stmt = conn.createStatement().unwrap(PhoenixStatement.class);
+            rs = stmt.executeQuery("SELECT V2 FROM " + fullTableName + " WHERE V1 = 'a'");
+            assertTrue(rs.next());
+            assertEquals("0", rs.getString(1));
+            assertEquals(fullTableName, stmt.getQueryPlan().getContext().getCurrentTable().getTable().getName().getString());
+            // if we're over the threshold, the rebuilder should disable the index
+            waitForIndexState(conn, fullTableName, fullIndexName, PIndexState.DISABLE);
+        } finally {
+            EnvironmentEdgeManager.reset();
+        }
+    }
+
     //Tests that when we're updating an index from within the RS (e.g. UngruopedAggregateRegionObserver),
     // if the index write fails the index gets disabled
     @Test
@@ -1048,22 +1097,9 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
                 try {
                     conn.createStatement().execute("DELETE FROM " + fullTableName);
                     fail();
-                } catch (CommitException|PhoenixIOException e) {
-                    // Expected
-                }
-                assertTrue(TestUtil.checkIndexState(conn, fullIndexName, PIndexState.DISABLE, null));
-                // reset the index state to ACTIVE
-                HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
-                IndexUtil.updateIndexState(fullIndexName, 0, metaTable, PIndexState.INACTIVE);
-                IndexUtil.updateIndexState(fullIndexName, 0, metaTable, PIndexState.ACTIVE);
-                TestUtil.removeCoprocessor(conn, fullIndexName, WriteFailingRegionObserver.class);
-                conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0', 't')");
-                TestUtil.addCoprocessor(conn, fullIndexName, WriteFailingRegionObserver.class);
-                try {
-                    conn.createStatement().execute("DELETE FROM " + fullTableName + " WHERE v1='a'");
-                    fail();
-                } catch (CommitException|PhoenixIOException e) {
+                } catch (SQLException e) {
                     // Expected
+                    assertEquals(SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode(), e.getErrorCode());
                 }
                 assertTrue(TestUtil.checkIndexState(conn, fullIndexName, PIndexState.DISABLE, null));
             } finally {
@@ -1075,6 +1111,12 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
     public static class WriteFailingRegionObserver extends SimpleRegionObserver {
         @Override
         public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+            // we need to advance the clock, since the index retry logic (copied from HBase) has a time component
+            EnvironmentEdge delegate = EnvironmentEdgeManager.getDelegate();
+            if (delegate instanceof MyClock) {
+                MyClock myClock = (MyClock) delegate;
+                myClock.time += 1000;
+            }
             throw new DoNotRetryIOException("Simulating write failure on " + c.getEnvironment().getRegionInfo().getTable().getNameAsString());
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 47ad7cf..d08db27 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -552,22 +552,27 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     TableName.valueOf(table.getPhysicalName().getBytes()));
 
             builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
-            long disableIndexTimestamp = table.getIndexDisableTimestamp();
-            long minNonZerodisableIndexTimestamp = disableIndexTimestamp > 0 ? disableIndexTimestamp : Long.MAX_VALUE;
-            for (PTable index : table.getIndexes()) {
-                disableIndexTimestamp = index.getIndexDisableTimestamp();
-                if (disableIndexTimestamp > 0 && (index.getIndexState() == PIndexState.ACTIVE || index.getIndexState() == PIndexState.PENDING_ACTIVE) && disableIndexTimestamp < minNonZerodisableIndexTimestamp) {
-                    minNonZerodisableIndexTimestamp = disableIndexTimestamp;
+            builder.setMutationTime(currentTime);
+            if (blockWriteRebuildIndex) {
+                long disableIndexTimestamp = table.getIndexDisableTimestamp();
+                long minNonZerodisableIndexTimestamp = disableIndexTimestamp > 0 ? disableIndexTimestamp : Long.MAX_VALUE;
+                for (PTable index : table.getIndexes()) {
+                    disableIndexTimestamp = index.getIndexDisableTimestamp();
+                    if (disableIndexTimestamp > 0
+                            && (index.getIndexState() == PIndexState.ACTIVE
+                                    || index.getIndexState() == PIndexState.PENDING_ACTIVE
+                                    || index.getIndexState() == PIndexState.PENDING_DISABLE)
+                            && disableIndexTimestamp < minNonZerodisableIndexTimestamp) {
+                        minNonZerodisableIndexTimestamp = disableIndexTimestamp;
+                    }
+                }
+                // Freeze time for table at min non-zero value of INDEX_DISABLE_TIMESTAMP
+                // This will keep the table consistent with index as the table has had one more
+                // batch applied to it.
+                if (minNonZerodisableIndexTimestamp != Long.MAX_VALUE) {
+                    // Subtract one because we add one due to timestamp granularity in Windows
+                    builder.setMutationTime(minNonZerodisableIndexTimestamp - 1);
                 }
-            }
-            // Freeze time for table at min non-zero value of INDEX_DISABLE_TIMESTAMP
-            // This will keep the table consistent with index as the table has had one more
-            // batch applied to it.
-            if (minNonZerodisableIndexTimestamp == Long.MAX_VALUE) {
-                builder.setMutationTime(currentTime);
-            } else {
-                // Subtract one because we add one due to timestamp granularity in Windows
-                builder.setMutationTime(minNonZerodisableIndexTimestamp - 1);
             }
 
             if (table.getTimeStamp() != tableTimeStamp) {
@@ -932,6 +937,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         if (indexState == PIndexState.PENDING_ACTIVE && clientVersion < PhoenixDatabaseMetaData.MIN_PENDING_ACTIVE_INDEX) {
             indexState = PIndexState.ACTIVE;
         }
+        // If client is not yet up to 4.14, then translate PENDING_DISABLE to DISABLE
+        // since the client won't have this index state in its enum.
+        if (indexState == PIndexState.PENDING_DISABLE && clientVersion < PhoenixDatabaseMetaData.MIN_PENDING_DISABLE_INDEX) {
+            // note: for older clients, we have to rely on the rebuilder to transition PENDING_DISABLE -> DISABLE
+            indexState = PIndexState.DISABLE;
+        }
         Cell immutableRowsKv = tableKeyValues[IMMUTABLE_ROWS_INDEX];
         boolean isImmutableRows =
                 immutableRowsKv == null ? false : (Boolean) PBoolean.INSTANCE.toObject(
@@ -3663,6 +3674,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 // Timestamp of INDEX_STATE gets updated with each call
                 long actualTimestamp = currentStateKV.getTimestamp();
                 long curTimeStampVal = 0;
+                long newDisableTimeStamp = 0;
                 if ((currentDisableTimeStamp != null && currentDisableTimeStamp.getValueLength() > 0)) {
                     curTimeStampVal = (Long) PLong.INSTANCE.toObject(currentDisableTimeStamp.getValueArray(),
                             currentDisableTimeStamp.getValueOffset(), currentDisableTimeStamp.getValueLength());
@@ -3679,7 +3691,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                             done.run(builder.build());
                             return;
                         }
-                        long newDisableTimeStamp = (Long) PLong.INSTANCE.toObject(newDisableTimeStampCell.getValueArray(),
+                        newDisableTimeStamp = (Long) PLong.INSTANCE.toObject(newDisableTimeStampCell.getValueArray(),
                                 newDisableTimeStampCell.getValueOffset(), newDisableTimeStampCell.getValueLength());
                         // We use the sign of the INDEX_DISABLE_TIMESTAMP to differentiate the keep-index-active (negative)
                         // from block-writes-to-data-table case. In either case, we want to keep the oldest timestamp to
@@ -3688,7 +3700,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         // We do legitimately move the INDEX_DISABLE_TIMESTAMP to be newer when we're rebuilding the
                         // index in which case the state will be INACTIVE or PENDING_ACTIVE.
                         if (curTimeStampVal != 0 
-                                && (newState == PIndexState.DISABLE || newState == PIndexState.PENDING_ACTIVE) 
+                                && (newState == PIndexState.DISABLE || newState == PIndexState.PENDING_ACTIVE || newState == PIndexState.PENDING_DISABLE)
                                 && Math.abs(curTimeStampVal) < Math.abs(newDisableTimeStamp)) {
                             // do not reset disable timestamp as we want to keep the min
                             newKVs.remove(disableTimeStampKVIndex);
@@ -3717,6 +3729,13 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     if (newState == PIndexState.ACTIVE) {
                         newState = PIndexState.DISABLE;
                     }
+                    // Can't transition from DISABLE to PENDING_DISABLE
+                    if (newState == PIndexState.PENDING_DISABLE) {
+                        builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
+                        builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+                        done.run(builder.build());
+                        return;
+                    }
                 }
 
                 if (currentState == PIndexState.BUILDING && newState != PIndexState.ACTIVE) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index fe11ec7..efad1e7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -63,7 +63,7 @@ import com.google.protobuf.ByteString;
  */
 public abstract class MetaDataProtocol extends MetaDataService {
     public static final int PHOENIX_MAJOR_VERSION = 4;
-    public static final int PHOENIX_MINOR_VERSION = 13;
+    public static final int PHOENIX_MINOR_VERSION = 14;
     public static final int PHOENIX_PATCH_NUMBER = 0;
     public static final int PHOENIX_VERSION =
             VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER);
@@ -93,8 +93,9 @@ public abstract class MetaDataProtocol extends MetaDataService {
     // Since there's no upgrade code, keep the version the same as the previous version
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
     // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* constants
-    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0;
+    public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0;
     
     // ALWAYS update this map whenever rolling out a new release (major, minor or patch release). 
     // Key is the SYSTEM.CATALOG timestamp for the version and value is the version string.
@@ -114,6 +115,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0, "4.11.x");
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0, "4.12.x");
         TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0, "4.13.x");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0, "4.14.x");
     }
     
     public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION + "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER; 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index af06235..e51a61e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -229,6 +229,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
         private final long rebuildIndexBatchSize;
         private final long configuredBatches;
         private final long indexDisableTimestampThreshold;
+        private final long pendingDisableThreshold;
         private final ReadOnlyProps props;
         private final List<String> onlyTheseTables;
 
@@ -247,6 +248,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
             this.indexDisableTimestampThreshold =
                     configuration.getLong(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD,
                         QueryServicesOptions.DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD);
+            this.pendingDisableThreshold =
+                    configuration.getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD,
+                        QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD);
             this.props = new ReadOnlyProps(env.getConfiguration().iterator());
         }
 
@@ -342,6 +346,18 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                     }
                     
                     PIndexState indexState = PIndexState.fromSerializedValue(indexStateBytes[0]);
+                    long elapsedSinceDisable = EnvironmentEdgeManager.currentTimeMillis() - Math.abs(indexDisableTimestamp);
+
+                    // on an index write failure, the server side transitions to PENDING_DISABLE, then the client
+                    // retries, and after retries are exhausted, disables the index
+                    if (indexState == PIndexState.PENDING_DISABLE) {
+                        if (elapsedSinceDisable > pendingDisableThreshold) {
+                            // too long in PENDING_DISABLE - client didn't disable the index, so we do it here
+                            IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.DISABLE, indexDisableTimestamp);
+                        }
+                        continue;
+                    }
+
                     // Only perform relatively expensive check for all regions online when index
                     // is disabled or pending active since that's the state it's placed into when
                     // an index write fails.
@@ -351,7 +367,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                                 + indexPTable.getName() + " are online.");
                         continue;
                     }
-                    if (EnvironmentEdgeManager.currentTimeMillis() - Math.abs(indexDisableTimestamp) > indexDisableTimestampThreshold) {
+
+                    if (elapsedSinceDisable > indexDisableTimestampThreshold) {
                         /*
                          * It has been too long since the index has been disabled and any future
                          * attempts to reenable it likely will fail. So we are going to mark the

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index af50420..7692bc8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -53,22 +53,18 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.CoprocessorHConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.controller.InterRegionServerIndexRpcControllerFactory;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
@@ -81,9 +77,9 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.cache.ServerCacheClient;
-import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
 import org.apache.phoenix.coprocessor.generated.PTableProtos;
 import org.apache.phoenix.exception.DataExceedsCapacityException;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.execute.TupleProjector;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.ExpressionType;
@@ -92,13 +88,15 @@ import org.apache.phoenix.expression.aggregator.Aggregators;
 import org.apache.phoenix.expression.aggregator.ServerAggregators;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
+import org.apache.phoenix.hbase.index.exception.IndexWriteException;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.index.PhoenixIndexCodec;
+import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
+import org.apache.phoenix.index.PhoenixIndexFailurePolicy.MutateCommand;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.join.HashJoinInfo;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -136,22 +134,19 @@ import org.apache.phoenix.util.ExpressionUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
-import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.StringUtil;
-import org.apache.phoenix.util.TimeKeeper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Predicate;
 import com.google.common.base.Throwables;
-import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import com.google.common.primitives.Ints;
@@ -205,6 +200,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
     private KeyValueBuilder kvBuilder;
     private Configuration upsertSelectConfig;
     private Configuration compactionConfig;
+    private Configuration indexWriteConfig;
+    private ReadOnlyProps indexWriteProps;
 
     @Override
     public void start(CoprocessorEnvironment e) throws IOException {
@@ -234,6 +231,13 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         compactionConfig.setInt(HConstants.HBASE_CLIENT_PAUSE,
             e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRY_PAUSE,
                 QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRY_PAUSE));
+
+        // For retries of index write failures, use the same # of retries as the rebuilder
+        indexWriteConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
+        indexWriteConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+            e.getConfiguration().getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER,
+                QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER));
+        indexWriteProps = new ReadOnlyProps(indexWriteConfig.iterator());
     }
 
     private void commitBatch(Region region, List<Mutation> mutations, long blockingMemstoreSize) throws IOException {
@@ -254,7 +258,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
           }
       }
       // TODO: should we use the one that is all or none?
-      logger.debug("Committing bactch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString());
+      logger.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString());
       region.batchMutate(mutations.toArray(mutationArray), HConstants.NO_NONCE, HConstants.NO_NONCE);
     }
 
@@ -860,21 +864,65 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         }
     }
 
-    private void commit(Region region, List<Mutation> mutations, byte[] indexUUID, long blockingMemStoreSize,
-            byte[] indexMaintainersPtr, byte[] txState, HTable targetHTable, boolean useIndexProto,
+    private void commit(final Region region, List<Mutation> mutations, byte[] indexUUID, final long blockingMemStoreSize,
+            byte[] indexMaintainersPtr, byte[] txState, final HTable targetHTable, boolean useIndexProto,
                         boolean isPKChanging)
             throws IOException {
-        List<Mutation> localRegionMutations = Lists.newArrayList();
-        List<Mutation> remoteRegionMutations = Lists.newArrayList();
+        final List<Mutation> localRegionMutations = Lists.newArrayList();
+        final List<Mutation> remoteRegionMutations = Lists.newArrayList();
         setIndexAndTransactionProperties(mutations, indexUUID, indexMaintainersPtr, txState, useIndexProto);
         separateLocalAndRemoteMutations(targetHTable, region, mutations, localRegionMutations, remoteRegionMutations,
             isPKChanging);
-        commitBatch(region, localRegionMutations, blockingMemStoreSize);
-        commitBatchWithHTable(targetHTable, remoteRegionMutations);
+        try {
+            commitBatch(region, localRegionMutations, blockingMemStoreSize);
+        } catch (IOException e) {
+            handleIndexWriteException(localRegionMutations, e, new MutateCommand() {
+                @Override
+                public void doMutation() throws IOException {
+                    commitBatch(region, localRegionMutations, blockingMemStoreSize);
+                }
+            });
+        }
+        try {
+            commitBatchWithHTable(targetHTable, remoteRegionMutations);
+        } catch (IOException e) {
+            handleIndexWriteException(remoteRegionMutations, e, new MutateCommand() {
+                @Override
+                public void doMutation() throws IOException {
+                    commitBatchWithHTable(targetHTable, remoteRegionMutations);
+                }
+            });
+        }
         localRegionMutations.clear();
         remoteRegionMutations.clear();
     }
 
+    private void handleIndexWriteException(final List<Mutation> localRegionMutations, IOException origIOE,
+            MutateCommand mutateCommand) throws IOException {
+        long serverTimestamp = ServerUtil.parseTimestampFromRemoteException(origIOE);
+        SQLException inferredE = ServerUtil.parseLocalOrRemoteServerException(origIOE);
+        if (inferredE != null && inferredE.getErrorCode() == SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode()) {
+            // For an index write failure, the data table write succeeded,
+            // so when we retry we need to set REPLAY_WRITES
+            for (Mutation mutation : localRegionMutations) {
+                mutation.setAttribute(REPLAY_WRITES, REPLAY_ONLY_INDEX_WRITES);
+                // use the server timestamp for index write retrys
+                KeyValueUtil.setTimestamp(mutation, serverTimestamp);
+            }
+            IndexWriteException iwe = PhoenixIndexFailurePolicy.getIndexWriteException(inferredE);
+            try (PhoenixConnection conn =
+                    QueryUtil.getConnectionOnServer(indexWriteConfig)
+                            .unwrap(PhoenixConnection.class)) {
+                PhoenixIndexFailurePolicy.doBatchWithRetries(mutateCommand, iwe, conn,
+                    indexWriteProps);
+            } catch (Exception e) {
+                throw new DoNotRetryIOException(e);
+            }
+        } else {
+            throw origIOE;
+        }
+    }
+
     private void separateLocalAndRemoteMutations(HTable targetHTable, Region region, List<Mutation> mutations,
                                                  List<Mutation> localRegionMutations, List<Mutation> remoteRegionMutations,
                                                  boolean isPKChanging){

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 2301c32..0f29f3f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -367,6 +367,7 @@ public enum SQLExceptionCode {
     CONNECTION_CLOSED(1111, "XCL11", "Connectioin is closed."),
 
     INDEX_FAILURE_BLOCK_WRITE(1120, "XCL20", "Writes to table blocked until index can be updated."),
+    INDEX_WRITE_FAILURE(1121, "XCL21", "Write to the index failed."),
     
     UPDATE_CACHE_FREQUENCY_INVALID(1130, "XCL30", "UPDATE_CACHE_FREQUENCY cannot be set to ALWAYS if APPEND_ONLY_SCHEMA is true."),
     CANNOT_DROP_COL_APPEND_ONLY_SCHEMA(1131, "XCL31", "Cannot drop column from table that with append only schema."),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 993438e..0719966 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -53,11 +53,14 @@ import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.hbase.index.exception.IndexWriteException;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.index.IndexMetaDataCacheClient;
 import org.apache.phoenix.index.PhoenixIndexBuilder;
 import org.apache.phoenix.index.PhoenixIndexCodec;
+import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
+import org.apache.phoenix.index.PhoenixIndexFailurePolicy.MutateCommand;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
 import org.apache.phoenix.monitoring.GlobalClientMetrics;
@@ -983,6 +986,8 @@ public class MutationState implements SQLCloseable {
                 long mutationCommitTime = 0;
                 long numFailedMutations = 0;;
                 long startTime = 0;
+                boolean shouldRetryIndexedMutation = false;
+                IndexWriteException iwe = null;
                 do {
                     TableRef origTableRef = tableInfo.getOrigTableRef();
                     PTable table = origTableRef.getTable();
@@ -1016,8 +1021,25 @@ public class MutationState implements SQLCloseable {
                         startTime = System.currentTimeMillis();
                         child.addTimelineAnnotation("Attempt " + retryCount);
                         List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
-                        for (List<Mutation> mutationBatch : mutationBatchList) {
-                            hTable.batch(mutationBatch);
+                        for (final List<Mutation> mutationBatch : mutationBatchList) {
+                            if (shouldRetryIndexedMutation) {
+                                // if there was an index write failure, retry the mutation in a loop
+                                final HTableInterface finalHTable = hTable;
+                                PhoenixIndexFailurePolicy.doBatchWithRetries(new MutateCommand() {
+                                    @Override
+                                    public void doMutation() throws IOException {
+                                        try {
+                                            finalHTable.batch(mutationBatch);
+                                        } catch (InterruptedException e) {
+                                            Thread.currentThread().interrupt();
+                                            throw new IOException(e);
+                                        }
+                                    }}, iwe,
+                                    connection, connection.getQueryServices().getProps());
+                            } else {
+                                hTable.batch(mutationBatch);
+                            }
+
                             batchCount++;
                             if (logger.isDebugEnabled()) logger.debug("Sent batch of " + mutationBatch.size() + " for " + Bytes.toString(htableName));
                         }
@@ -1054,6 +1076,19 @@ public class MutationState implements SQLCloseable {
                                 child = Tracing.child(span,"Failed batch, attempting retry");
 
                                 continue;
+                            } else if (inferredE.getErrorCode() == SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode()) {
+                                iwe = PhoenixIndexFailurePolicy.getIndexWriteException(inferredE);
+                                if (iwe != null && !shouldRetryIndexedMutation) {
+                                    // For an index write failure, the data table write succeeded,
+                                    // so when we retry we need to set REPLAY_WRITES
+                                    for (Mutation m : mutationList) {
+                                        m.setAttribute(BaseScannerRegionObserver.REPLAY_WRITES, BaseScannerRegionObserver.REPLAY_ONLY_INDEX_WRITES);
+                                        KeyValueUtil.setTimestamp(m, serverTimestamp);
+                                    }
+                                    shouldRetry = true;
+                                    shouldRetryIndexedMutation = true;
+                                    continue;
+                                }
                             }
                             e = inferredE;
                         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 9686789..f8195f1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -185,7 +185,6 @@ public class Indexer extends BaseRegionObserver {
   private long slowPostOpenThreshold;
   private long slowPreIncrementThreshold;
   private int rowLockWaitDuration;
-  private Configuration compactionConfig;
   
   public static final String RecoveryFailurePolicyKeyForTesting = INDEX_RECOVERY_FAILURE_POLICY_KEY;
 
@@ -242,15 +241,6 @@ public class Indexer extends BaseRegionObserver {
         this.metricSource = MetricsIndexerSourceFactory.getInstance().create();
         setSlowThresholds(e.getConfiguration());
 
-        compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
-        // lower the number of rpc retries, so we don't hang the compaction
-        compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-            e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER,
-                QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER));
-        compactionConfig.setInt(HConstants.HBASE_CLIENT_PAUSE,
-            e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRY_PAUSE,
-                QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRY_PAUSE));
-
         try {
           // get the specified failure policy. We only ever override it in tests, but we need to do it
           // here

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java
index 2ec29bc..531baa6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java
@@ -17,7 +17,10 @@
  */
 package org.apache.phoenix.hbase.index.exception;
 
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.phoenix.query.QueryServicesOptions;
 
 /**
  * Generic {@link Exception} that an index write has failed
@@ -25,19 +28,57 @@ import org.apache.hadoop.hbase.HBaseIOException;
 @SuppressWarnings("serial")
 public class IndexWriteException extends HBaseIOException {
 
+    /*
+     * We pass this message back to the client so that the config only needs to be set on the
+     * server side.
+     */
+    private static final String DISABLE_INDEX_ON_FAILURE_MSG = "disableIndexOnFailure=";
+    private boolean disableIndexOnFailure;
+
   public IndexWriteException() {
     super();
   }
 
+    /**
+     * Used for the case where we cannot reach the index, but not sure of the table or the mutations
+     * that caused the failure
+     * @param message
+     * @param cause
+     */
   public IndexWriteException(String message, Throwable cause) {
-    super(message, cause);
+      super(message, cause);
+  }
+
+  public IndexWriteException(String message, Throwable cause, boolean disableIndexOnFailure) {
+    super(prependDisableIndexMsg(message, disableIndexOnFailure), cause);
   }
 
-  public IndexWriteException(String message) {
-    super(message);
+  public IndexWriteException(String message, boolean disableIndexOnFailure) {
+    super(prependDisableIndexMsg(message, disableIndexOnFailure));
+    this.disableIndexOnFailure = disableIndexOnFailure;
   }
 
-  public IndexWriteException(Throwable cause) {
+  private static String prependDisableIndexMsg(String message, boolean disableIndexOnFailure) {
+    return DISABLE_INDEX_ON_FAILURE_MSG + disableIndexOnFailure + " " + message;
+}
+
+public IndexWriteException(Throwable cause) {
     super(cause);
   }
+
+    public static boolean parseDisableIndexOnFailure(String message) {
+        Pattern p =
+                Pattern.compile(DISABLE_INDEX_ON_FAILURE_MSG + "(true|false)",
+                    Pattern.CASE_INSENSITIVE);
+        Matcher m = p.matcher(message);
+        if (m.find()) {
+            boolean disableIndexOnFailure = Boolean.parseBoolean(m.group(1));
+            return disableIndexOnFailure;
+        }
+        return QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX;
+    }
+
+    public boolean isDisableIndexOnFailure() {
+        return disableIndexOnFailure;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java
index 546b43d..d593791 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java
@@ -18,8 +18,14 @@
 package org.apache.phoenix.hbase.index.exception;
 
 import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+
+import com.google.common.collect.Lists;
 
 /**
  * Indicate a failure to write to multiple index tables.
@@ -27,15 +33,34 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 @SuppressWarnings("serial")
 public class MultiIndexWriteFailureException extends IndexWriteException {
 
+  public static final String FAILURE_MSG = "Failed to write to multiple index tables: ";
   private List<HTableInterfaceReference> failures;
 
   /**
    * @param failures the tables to which the index write did not succeed
    */
-  public MultiIndexWriteFailureException(List<HTableInterfaceReference> failures) {
-    super("Failed to write to multiple index tables");
+  public MultiIndexWriteFailureException(List<HTableInterfaceReference> failures, boolean disableIndexOnFailure) {
+    super(FAILURE_MSG + failures, disableIndexOnFailure);
     this.failures = failures;
+  }
 
+  /**
+   * This constructor used to rematerialize this exception when receiving
+   * an rpc exception from the server
+   * @param message detail message
+   */
+  public MultiIndexWriteFailureException(String message) {
+      super(message, IndexWriteException.parseDisableIndexOnFailure(message));
+      Pattern p = Pattern.compile(FAILURE_MSG + "\\[(.*)\\]");
+      Matcher m = p.matcher(message);
+      if (m.find()) {
+          failures = Lists.newArrayList();
+          String tablesStr = m.group(1);
+          for (String tableName : tablesStr.split(",\\s")) {
+            HTableInterfaceReference tableRef = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes(tableName)));
+            failures.add(tableRef);
+        }
+      }
   }
 
   public List<HTableInterfaceReference> getFailedTables() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
index eb3b521..610a82a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.hbase.index.exception;
 
 import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.client.Mutation;
 
@@ -27,6 +29,7 @@ import org.apache.hadoop.hbase.client.Mutation;
 @SuppressWarnings("serial")
 public class SingleIndexWriteFailureException extends IndexWriteException {
 
+  public static final String FAILED_MSG = "Failed to make index update:";
   private String table;
 
   /**
@@ -45,13 +48,27 @@ public class SingleIndexWriteFailureException extends IndexWriteException {
    * @param cause underlying reason for the failure
    */
   public SingleIndexWriteFailureException(String targetTableName, List<Mutation> mutations,
-      Exception cause) {
-    super("Failed to make index update:\n\t table: " + targetTableName + "\n\t edits: " + mutations
-        + "\n\tcause: " + cause == null ? "UNKNOWN" : cause.getMessage(), cause);
+      Exception cause, boolean disableIndexOnFailure) {
+    super(FAILED_MSG + "\n\t table: " + targetTableName + "\n\t edits: " + mutations
+        + "\n\tcause: " + cause == null ? "UNKNOWN" : cause.getMessage(), cause, disableIndexOnFailure);
     this.table = targetTableName;
   }
 
   /**
+   * This constructor used to rematerialize this exception when receiving
+   * an rpc exception from the server
+   * @param message detail message
+   */
+  public SingleIndexWriteFailureException(String msg) {
+      super(msg, IndexWriteException.parseDisableIndexOnFailure(msg));
+      Pattern pattern = Pattern.compile(FAILED_MSG + ".* table: ([\\S]*)\\s.*", Pattern.DOTALL);
+      Matcher m = pattern.matcher(msg);
+      if (m.find()) {
+          this.table = m.group(1);
+      }
+  }
+
+  /**
    * @return The table to which we failed to write the index updates. If unknown, returns
    *         <tt>null</tt>
    */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index 3649069..29b9faf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -70,13 +70,13 @@ public class IndexWriterUtils {
    public static final String HTABLE_KEEP_ALIVE_KEY = "hbase.htable.threads.keepalivetime";
 
    public static final String INDEX_WRITER_RPC_RETRIES_NUMBER = "phoenix.index.writes.rpc.retries.number";
-   /**
-    * Based on the logic in HBase's AsyncProcess, a default of 11 retries with a pause of 100ms
-    * approximates 48 sec total retry time (factoring in backoffs).  The total time should be less
-    * than HBase's rpc timeout (default of 60 sec) or else the client will retry before receiving
-    * the response
-    */
-   public static final int DEFAULT_INDEX_WRITER_RPC_RETRIES_NUMBER = 11;
+    /**
+     * Retry server-server index write rpc only once, and let the client retry the data write
+     * instead to avoid typing up the handler
+     */
+   // note in HBase 2+, numTries = numRetries + 1
+   // in prior versions, numTries = numRetries
+   public static final int DEFAULT_INDEX_WRITER_RPC_RETRIES_NUMBER = 1;
    public static final String INDEX_WRITER_RPC_PAUSE = "phoenix.index.writes.rpc.pause";
    public static final int DEFAULT_INDEX_WRITER_RPC_PAUSE = 100;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index e4e8343..0bb8784 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -35,6 +35,7 @@ import org.apache.phoenix.hbase.index.parallel.ThreadPoolManager;
 import org.apache.phoenix.hbase.index.table.HTableFactory;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
+import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
 import org.apache.phoenix.util.IndexUtil;
 
 import com.google.common.collect.Multimap;
@@ -167,11 +168,11 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                     } catch (SingleIndexWriteFailureException e) {
                         throw e;
                     } catch (IOException e) {
-                        throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
+                        throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e, PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env));
                     } catch (InterruptedException e) {
                         // reset the interrupt status on the thread
                         Thread.currentThread().interrupt();
-                        throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
+                        throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e, PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env));
                     }
                     finally{
                         if (table != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 0449e9e..94d4f0f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -40,6 +40,7 @@ import org.apache.phoenix.hbase.index.parallel.WaitForCompletionTaskRunner;
 import org.apache.phoenix.hbase.index.table.HTableFactory;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
+import org.apache.phoenix.index.PhoenixIndexFailurePolicy;
 import org.apache.phoenix.util.IndexUtil;
 
 import com.google.common.collect.Multimap;
@@ -110,6 +111,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
         this.factory = factory;
         this.abortable = new CapturingAbortable(abortable);
         this.stopped = stop;
+        this.env = env;
     }
 
     @Override
@@ -226,7 +228,8 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
         // if any of the tasks failed, then we need to propagate the failure
         if (failures.size() > 0) {
             // make the list unmodifiable to avoid any more synchronization concerns
-            throw new MultiIndexWriteFailureException(Collections.unmodifiableList(failures));
+            throw new MultiIndexWriteFailureException(Collections.unmodifiableList(failures),
+                    PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env));
         }
         return;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index ba6371b..14f8307 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -30,21 +30,28 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.hbase.index.exception.IndexWriteException;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
+import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.write.DelegateIndexFailurePolicy;
 import org.apache.phoenix.hbase.index.write.KillServerOnFailurePolicy;
@@ -59,6 +66,7 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 
@@ -102,14 +110,8 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 rebuildIndexOnFailure = Boolean.parseBoolean(value);
             }
         }
-        String value = htd.getValue(DISABLE_INDEX_ON_WRITE_FAILURE);
-        if (value == null) {
-            disableIndexOnFailure = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_DISABLE_INDEX, 
-                QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX);
-        } else {
-            disableIndexOnFailure = Boolean.parseBoolean(value);
-        }
-        value = htd.getValue(BLOCK_DATA_TABLE_WRITES_ON_WRITE_FAILURE);
+        disableIndexOnFailure = getDisableIndexOnFailure(env);
+        String value = htd.getValue(BLOCK_DATA_TABLE_WRITES_ON_WRITE_FAILURE);
         if (value == null) {
             blockDataTableWritesOnFailure = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, 
                 QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
@@ -149,7 +151,11 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
             throwing = false;
         } finally {
             if (!throwing) {
-            	IOException ioException = ServerUtil.wrapInDoNotRetryIOException("Unable to update the following indexes: " + attempted.keySet(), cause, timestamp);
+                SQLException sqlException =
+                        new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_WRITE_FAILURE)
+                                .setRootCause(cause).setMessage(cause.getLocalizedMessage()).build()
+                                .buildException();
+                IOException ioException = ServerUtil.wrapInDoNotRetryIOException(null, sqlException, timestamp);
             	Mutation m = attempted.entries().iterator().next().getValue();
             	boolean isIndexRebuild = PhoenixIndexMetaData.isIndexRebuild(m.getAttributesMap());
             	// Always throw if rebuilding index since the rebuilder needs to know if it was successful
@@ -212,7 +218,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
             return timestamp;
         }
 
-        final PIndexState newState = disableIndexOnFailure ? PIndexState.DISABLE : PIndexState.PENDING_ACTIVE;
+        final PIndexState newState = disableIndexOnFailure ? PIndexState.PENDING_DISABLE : PIndexState.PENDING_ACTIVE;
         final long fTimestamp=timestamp;
         // for all the index tables that we've found, try to disable them and if that fails, try to
         return User.runAsLoginUser(new PrivilegedExceptionAction<Long>() {
@@ -254,12 +260,9 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                                 throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
                             }
                         }
-                        if (leaveIndexActive)
-                            LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
-                                    + " due to an exception while writing updates.", cause);
-                        else
-                            LOG.info("Successfully disabled index " + indexTableName
-                                    + " due to an exception while writing updates.", cause);
+                        LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
+                                + " due to an exception while writing updates. indexState=" + newState,
+                            cause);
                     } catch (Throwable t) {
                         if (t instanceof Exception) {
                             throw (Exception)t;
@@ -331,4 +334,158 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
         }
         return indexTableNames;
     }
+
+    /**
+     * Check config for whether to disable index on index write failures
+     * @param htd
+     * @param config
+     * @param connection
+     * @return The table config for {@link PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE}
+     * @throws SQLException
+     */
+    public static boolean getDisableIndexOnFailure(RegionCoprocessorEnvironment env) {
+        HTableDescriptor htd = env.getRegion().getTableDesc();
+        Configuration config = env.getConfiguration();
+        String value = htd.getValue(PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE);
+        boolean disableIndexOnFailure;
+        if (value == null) {
+            disableIndexOnFailure =
+                    config.getBoolean(QueryServices.INDEX_FAILURE_DISABLE_INDEX,
+                        QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX);
+        } else {
+            disableIndexOnFailure = Boolean.parseBoolean(value);
+        }
+        return disableIndexOnFailure;
+    }
+
+    /**
+     * If we're leaving the index active after index write failures on the server side, then we get
+     * the exception on the client side here after hitting the max # of hbase client retries. We
+     * disable the index as it may now be inconsistent. The indexDisableTimestamp was already set
+     * on the server side, so the rebuilder will be run.
+     */
+    private static void handleIndexWriteFailureFromClient(IndexWriteException indexWriteException,
+            PhoenixConnection conn) {
+        handleExceptionFromClient(indexWriteException, conn, PIndexState.DISABLE);
+    }
+
+    private static void handleIndexWriteSuccessFromClient(IndexWriteException indexWriteException,
+            PhoenixConnection conn) {
+        handleExceptionFromClient(indexWriteException, conn, PIndexState.ACTIVE);
+    }
+
+    private static void handleExceptionFromClient(IndexWriteException indexWriteException,
+            PhoenixConnection conn, PIndexState indexState) {
+        try {
+            Set<String> indexesToUpdate = new HashSet<>();
+            if (indexWriteException instanceof MultiIndexWriteFailureException) {
+                MultiIndexWriteFailureException indexException =
+                        (MultiIndexWriteFailureException) indexWriteException;
+                List<HTableInterfaceReference> failedIndexes = indexException.getFailedTables();
+                if (indexException.isDisableIndexOnFailure() && failedIndexes != null) {
+                    for (HTableInterfaceReference failedIndex : failedIndexes) {
+                        String failedIndexTable = failedIndex.getTableName();
+                        if (!indexesToUpdate.contains(failedIndexTable)) {
+                            updateIndex(failedIndexTable, conn, indexState);
+                            indexesToUpdate.add(failedIndexTable);
+                        }
+                    }
+                }
+            } else if (indexWriteException instanceof SingleIndexWriteFailureException) {
+                SingleIndexWriteFailureException indexException =
+                        (SingleIndexWriteFailureException) indexWriteException;
+                String failedIndex = indexException.getTableName();
+                if (indexException.isDisableIndexOnFailure() && failedIndex != null) {
+                    updateIndex(failedIndex, conn, indexState);
+                }
+            }
+        } catch (Exception handleE) {
+            LOG.warn("Error while trying to handle index write exception", indexWriteException);
+        }
+    }
+
+    public static interface MutateCommand {
+        void doMutation() throws IOException;
+    }
+
+    /**
+     * Retries a mutationBatch where the index write failed.
+     * One attempt should have already been made before calling this.
+     * Max retries and exponential backoff logic mimics that of HBase's client
+     * If max retries are hit, the index is disabled.
+     * If the write is successful on a subsequent retry, the index is set back to ACTIVE
+     * @param mutateCommand mutation command to execute
+     * @param iwe original IndexWriteException
+     * @param connection connection to use
+     * @param config config used to get retry settings
+     * @throws Exception
+     */
+    public static void doBatchWithRetries(MutateCommand mutateCommand,
+            IndexWriteException iwe, PhoenixConnection connection, ReadOnlyProps config)
+            throws IOException {
+        int maxTries = config.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+            HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+        long pause = config.getLong(HConstants.HBASE_CLIENT_PAUSE,
+            HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
+        int numRetry = 1; // already tried once
+        // calculate max time to retry for
+        int timeout = 0;
+        for (int i = 0; i < maxTries; ++i) {
+          timeout = (int) (timeout + ConnectionUtils.getPauseTime(pause, i));
+        }
+        long canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout;
+        while (canRetryMore(numRetry++, maxTries, canRetryUntil)) {
+            try {
+                Thread.sleep(ConnectionUtils.getPauseTime(pause, numRetry)); // HBase's exponential backoff
+                mutateCommand.doMutation();
+                // success - change the index state from PENDING_DISABLE back to ACTIVE
+                handleIndexWriteSuccessFromClient(iwe, connection);
+                return;
+            } catch (IOException e) {
+                SQLException inferredE = ServerUtil.parseLocalOrRemoteServerException(e);
+                if (inferredE == null || inferredE.getErrorCode() != SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode()) {
+                    // if it's not an index write exception, throw exception, to be handled normally in caller's try-catch
+                    throw e;
+                }
+            } catch (InterruptedException e) {
+                Thread.currentThread().interrupt();
+                throw new IOException(e);
+            }
+        }
+        // max retries hit - disable the index
+        handleIndexWriteFailureFromClient(iwe, connection);
+        throw new DoNotRetryIOException(iwe); // send failure back to client
+    }
+
+    private static boolean canRetryMore(int numRetry, int maxRetries, long canRetryUntil) {
+        // If there is a single try we must not take into account the time.
+        return numRetry < maxRetries
+                || (maxRetries > 1 && EnvironmentEdgeManager.currentTime() < canRetryUntil);
+    }
+
+    /**
+     * Converts from SQLException to IndexWriteException
+     * @param sqlE the SQLException
+     * @return the IndexWriteException
+     */
+    public static IndexWriteException getIndexWriteException(SQLException sqlE) {
+        String sqlMsg = sqlE.getMessage();
+        if (sqlMsg.contains(MultiIndexWriteFailureException.FAILURE_MSG)) {
+            return new MultiIndexWriteFailureException(sqlMsg);
+        } else if (sqlMsg.contains(SingleIndexWriteFailureException.FAILED_MSG)) {
+            return new SingleIndexWriteFailureException(sqlMsg);
+        }
+        return null;
+    }
+
+    private static void updateIndex(String indexFullName, PhoenixConnection conn,
+            PIndexState indexState) throws SQLException {
+        if (PIndexState.DISABLE.equals(indexState)) {
+            LOG.info("Disabling index after hitting max number of index write retries: "
+                    + indexFullName);
+        } else if (PIndexState.ACTIVE.equals(indexState)) {
+            LOG.debug("Resetting index to active after subsequent success " + indexFullName);
+        }
+        IndexUtil.updateIndexState(conn, indexFullName, indexState, null);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 23330d8..094f743 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -322,6 +322,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
     public static final int MIN_RENEW_LEASE_VERSION = VersionUtil.encodeVersion("1", "1", "3");
     public static final int MIN_NAMESPACE_MAPPED_PHOENIX_VERSION = VersionUtil.encodeVersion("4", "8", "0");
     public static final int MIN_PENDING_ACTIVE_INDEX = VersionUtil.encodeVersion("4", "12", "0");
+    public static final int MIN_PENDING_DISABLE_INDEX = VersionUtil.encodeVersion("4", "14", "0");
     
     // Version below which we should turn off essential column family.
     public static final int ESSENTIAL_FAMILY_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "7");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 5cc415d..8481bc5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -57,7 +57,9 @@ import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
 
 import com.google.common.collect.Lists;
@@ -68,11 +70,14 @@ public class QueryOptimizer {
     private final QueryServices services;
     private final boolean useIndexes;
     private final boolean costBased;
+    private long indexPendingDisabledThreshold;
 
     public QueryOptimizer(QueryServices services) {
         this.services = services;
         this.useIndexes = this.services.getProps().getBoolean(QueryServices.USE_INDEXES_ATTRIB, QueryServicesOptions.DEFAULT_USE_INDEXES);
         this.costBased = this.services.getProps().getBoolean(QueryServices.COST_BASED_OPTIMIZER_ENABLED, QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED);
+        this.indexPendingDisabledThreshold = this.services.getProps().getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD,
+            QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD);
     }
 
     public QueryPlan optimize(PhoenixStatement statement, QueryPlan dataPlan) throws SQLException {
@@ -158,7 +163,7 @@ public class QueryOptimizer {
         return hintedPlan == null ? orderPlansBestToWorst(select, plans, stopAtBestPlan) : plans;
     }
     
-    private static QueryPlan getHintedQueryPlan(PhoenixStatement statement, SelectStatement select, List<PTable> indexes, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, List<QueryPlan> plans) throws SQLException {
+    private QueryPlan getHintedQueryPlan(PhoenixStatement statement, SelectStatement select, List<PTable> indexes, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, List<QueryPlan> plans) throws SQLException {
         QueryPlan dataPlan = plans.get(0);
         String indexHint = select.getHint().getHint(Hint.INDEX);
         if (indexHint == null) {
@@ -215,7 +220,7 @@ public class QueryOptimizer {
         return -1;
     }
     
-    private static QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan, boolean isHinted) throws SQLException {
+    private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan, boolean isHinted) throws SQLException {
         int nColumns = dataPlan.getProjector().getColumnCount();
         String tableAlias = dataPlan.getTableRef().getTableAlias();
 		String alias = tableAlias==null ? null : '"' + tableAlias + '"'; // double quote in case it's case sensitive
@@ -229,8 +234,11 @@ public class QueryOptimizer {
         // We will or will not do tuple projection according to the data plan.
         boolean isProjected = dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() == PTableType.PROJECTED;
         // Check index state of now potentially updated index table to make sure it's active
-        PIndexState indexState = resolver.getTables().get(0).getTable().getIndexState();
-        if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE) {
+        TableRef indexTableRef = resolver.getTables().get(0);
+        PTable indexTable = indexTableRef.getTable();
+        PIndexState indexState = indexTable.getIndexState();
+        if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE
+                || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) {
             try {
             	// translate nodes that match expressions that are indexed to the associated column parse node
                 indexSelect = ParseNodeRewriter.rewrite(indexSelect, new  IndexExpressionParseNodeRewriter(index, null, statement.getConnection(), indexSelect.getUdfParseNodes()));
@@ -246,10 +254,13 @@ public class QueryOptimizer {
                         && !plan.getContext().getDataColumns().isEmpty()) {
                     return null;
                 }
-                indexState = plan.getTableRef().getTable().getIndexState();
+                indexTableRef = plan.getTableRef();
+                indexTable = indexTableRef.getTable();
+                indexState = indexTable.getIndexState();
                 // Checking number of columns handles the wildcard cases correctly, as in that case the index
                 // must contain all columns from the data table to be able to be used.
-                if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE) {
+                if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE
+                        || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) {
                     if (plan.getProjector().getColumnCount() == nColumns) {
                         return plan;
                     } else if (index.getIndexType() == IndexType.GLOBAL) {
@@ -312,6 +323,12 @@ public class QueryOptimizer {
         return null;
     }
 
+    // returns true if we can still use the index
+    // retuns false if we've been in PENDING_DISABLE too long - index should be considered disabled
+    private boolean isUnderPendingDisableThreshold(long currentTimestamp, long indexDisableTimestamp) {
+        return currentTimestamp - indexDisableTimestamp <= indexPendingDisabledThreshold;
+    }
+
     /**
      * Order the plans among all the possible ones from best to worst.
      * If option COST_BASED_OPTIMIZER_ENABLED is on and stats are available, we order the plans based on

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 0b80f4d..2a31f09 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -144,6 +144,8 @@ public interface QueryServices extends SQLCloseable {
     public static final String INDEX_FAILURE_HANDLING_REBUILD_NUMBER_OF_BATCHES_PER_TABLE = "phoenix.index.rebuild.batch.perTable";
     // If index disable timestamp is older than this threshold, then index rebuild task won't attempt to rebuild it
     public static final String INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD = "phoenix.index.rebuild.disabletimestamp.threshold";
+    // threshold number of ms an index has been in PENDING_DISABLE, beyond which we consider it disabled
+    public static final String INDEX_PENDING_DISABLE_THRESHOLD = "phoenix.index.pending.disable.threshold";
 
     // Block writes to data table when index write fails
     public static final String INDEX_FAILURE_BLOCK_WRITE = "phoenix.index.failure.block.write";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 4d31974..d749433 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -198,6 +198,7 @@ public class QueryServicesOptions {
     public static final long DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT = 30000 * 60; // 30 mins
     public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 5; // 5 total tries at rpc level
     public static final int DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD = 30000 * 60; // 30 mins
+    public static final long DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD = 30000; // 30 secs
 
     /**
      * HConstants#HIGH_QOS is the max we will see to a standard table. We go higher to differentiate

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/schema/PIndexState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PIndexState.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PIndexState.java
index d7dbeca..2b6ac4a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PIndexState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PIndexState.java
@@ -27,7 +27,12 @@ public enum PIndexState {
     INACTIVE("i"),
     DISABLE("x"),
     REBUILD("r"),
-    PENDING_ACTIVE("p");
+    PENDING_ACTIVE("p"),
+    // Used when disabling an index on write failure (PHOENIX-4130)
+    // When an index write fails, it is put in this state, and we let the client retry the mutation
+    // After retries are exhausted, the client should mark the index as disabled, but if that
+    // doesn't happen, then the index is considered disabled if it's been in this state too long
+    PENDING_DISABLE("w");
 
     private final String serializedValue;
     private final byte[] serializedBytes;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b539cd62/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
index df6a349..4d8565f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -132,6 +133,17 @@ public class KeyValueUtil {
         return kvs[pos];
     }
 
+    public static void setTimestamp(Mutation m, long timestamp) {
+        byte[] tsBytes = Bytes.toBytes(timestamp);
+        for (List<Cell> family : m.getFamilyCellMap().values()) {
+            List<KeyValue> familyKVs = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValues(family);
+            for (KeyValue kv : familyKVs) {
+                int tsOffset = kv.getTimestampOffset();
+                System.arraycopy(tsBytes, 0, kv.getBuffer(), tsOffset, Bytes.SIZEOF_LONG);
+            }
+        }
+    }
+
     /*
      * Special comparator, *only* works for binary search.
      *


[6/7] phoenix git commit: PHOENIX-4549 Pherf - Column override and sequenced index creation support

Posted by pb...@apache.org.
PHOENIX-4549 Pherf - Column override and sequenced index creation support


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/04029fb2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/04029fb2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/04029fb2

Branch: refs/heads/4.x-cdh5.11.2
Commit: 04029fb28bb4a6d9aa30826fccdf9731805e6b43
Parents: 92bd6d6
Author: Mujtaba <mu...@apache.org>
Authored: Wed Feb 7 19:49:15 2018 +0000
Committer: Pedro Boado <pb...@apache.org>
Committed: Sun Feb 11 15:56:17 2018 +0000

----------------------------------------------------------------------
 .../phoenix/pherf/configuration/Column.java     |  23 +--
 .../pherf/configuration/DataTypeMapping.java    |   6 +-
 .../phoenix/pherf/configuration/Query.java      |  25 ++++
 .../phoenix/pherf/configuration/Scenario.java   |   3 +-
 .../phoenix/pherf/result/QueryResult.java       |  10 +-
 .../phoenix/pherf/result/ResultManager.java     |  18 ++-
 .../apache/phoenix/pherf/result/ResultUtil.java |   9 +-
 .../phoenix/pherf/rules/RulesApplier.java       | 142 +++++++++++++++----
 .../phoenix/pherf/schema/SchemaReader.java      |   2 +-
 .../apache/phoenix/pherf/util/PhoenixUtil.java  |  48 ++++++-
 .../pherf/workload/MultiThreadedRunner.java     |  35 ++++-
 .../phoenix/pherf/workload/QueryExecutor.java   |  36 ++---
 .../phoenix/pherf/workload/WriteWorkload.java   |  54 ++++++-
 .../scenario/prod_test_unsalted_scenario.xml    |  14 +-
 .../org/apache/phoenix/pherf/ColumnTest.java    |   3 +
 .../phoenix/pherf/ConfigurationParserTest.java  |   4 +-
 .../apache/phoenix/pherf/RuleGeneratorTest.java | 107 +++++++++++++-
 .../test/resources/datamodel/test_schema.sql    |   4 +
 .../test/resources/scenario/test_scenario.xml   |  48 ++++++-
 19 files changed, 488 insertions(+), 103 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java
index 7c9e180..0d64a39 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java
@@ -28,7 +28,8 @@ public class Column {
 	private String name;
     private String prefix;
     private DataSequence dataSequence;
-    private int length, minValue, maxValue, precision;
+    private int length, precision;
+    private long minValue, maxValue;
     private int nullChance;
     private boolean userDefined;
     private List<DataValue> dataValues;
@@ -40,8 +41,8 @@ public class Column {
         // Initialize int to negative value so we can distinguish 0 in mutations
         // Object fields can be detected with null
         this.length = Integer.MIN_VALUE;
-        this.minValue = Integer.MIN_VALUE;
-        this.maxValue = Integer.MIN_VALUE;
+        this.minValue = Long.MIN_VALUE;
+        this.maxValue = Long.MIN_VALUE;
         this.precision = Integer.MIN_VALUE;
         this.nullChance = Integer.MIN_VALUE;
         this.userDefined = false;
@@ -84,6 +85,10 @@ public class Column {
 	public int getLength() {
 		return length;
 	}
+	
+	public int getLengthExcludingPrefix() {
+		return (this.getPrefix() == null) ? this.length : this.length - this.getPrefix().length();
+	}
 
 	public void setLength(int length) {
 		this.length = length;
@@ -97,19 +102,19 @@ public class Column {
 		this.type = type;
 	}
 
-    public int getMinValue() {
+    public long getMinValue() {
         return minValue;
     }
 
-    public void setMinValue(int minValue) {
+    public void setMinValue(long minValue) {
         this.minValue = minValue;
     }
 
-    public int getMaxValue() {
+    public long getMaxValue() {
         return maxValue;
     }
 
-    public void setMaxValue(int maxValue) {
+    public void setMaxValue(long maxValue) {
         this.maxValue = maxValue;
     }
 
@@ -134,11 +139,11 @@ public class Column {
      *               obj contains only the fields you want to mutate this object into.
      */
     public void mutate(Column column) {
-        if (column.getMinValue() != Integer.MIN_VALUE) {
+        if (column.getMinValue() != Long.MIN_VALUE) {
             setMinValue(column.getMinValue());
         }
 
-        if (column.getMaxValue() != Integer.MIN_VALUE) {
+        if (column.getMaxValue() != Long.MIN_VALUE) {
             setMaxValue(column.getMaxValue());
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
index c266a57..0476df2 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
@@ -25,7 +25,11 @@ public enum DataTypeMapping {
     CHAR("CHAR", Types.CHAR),
     DECIMAL("DECIMAL", Types.DECIMAL),
     INTEGER("INTEGER", Types.INTEGER),
-    DATE("DATE", Types.DATE);
+    DATE("DATE", Types.DATE),
+    UNSIGNED_LONG("UNSIGNED_LONG", Types.LONGVARCHAR),
+    VARCHAR_ARRAY("VARCHAR ARRAY", Types.ARRAY),
+    VARBINARY("VARBINARY", Types.VARBINARY),
+    TIMESTAMP("TIMESTAMP", Types.TIMESTAMP);
 
     private final String sType;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java
index 1e5cabe..e283715 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java
@@ -18,9 +18,14 @@
 
 package org.apache.phoenix.pherf.configuration;
 
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import javax.xml.bind.annotation.XmlAttribute;
 import javax.xml.bind.annotation.XmlType;
 
+import org.apache.phoenix.pherf.rules.RulesApplier;
+
 @XmlType
 public class Query {
 
@@ -30,7 +35,12 @@ public class Query {
     private String ddl;
     private String queryGroup;
     private String id;
+    private Pattern pattern;
 
+    public Query() {
+    	pattern = Pattern.compile("\\[.*?\\]");
+    }
+    
     /**
      * SQL statement
      *
@@ -40,6 +50,21 @@ public class Query {
     public String getStatement() {
         return statement;
     }
+    
+    public String getDynamicStatement(RulesApplier ruleApplier, Scenario scenario) throws Exception {
+    	String ret = this.statement;
+    	String needQuotes = "";
+    	Matcher m = pattern.matcher(ret);
+        while(m.find()) {
+        	String dynamicField = m.group(0).replace("[", "").replace("]", "");
+        	Column dynamicColumn = ruleApplier.getRule(dynamicField, scenario);
+			needQuotes = (dynamicColumn.getType() == DataTypeMapping.CHAR || dynamicColumn
+					.getType() == DataTypeMapping.VARCHAR) ? "'" : "";
+			ret = ret.replace("[" + dynamicField + "]",
+					needQuotes + ruleApplier.getDataValue(dynamicColumn).getValue() + needQuotes);
+     }
+      	return ret;    	
+    }
 
     public void setStatement(String statement) {
         // normalize statement - merge all consecutive spaces into one

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java
index 200fdc5..08266b7 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java
@@ -36,13 +36,12 @@ public class Scenario {
     private Map<String, String> phoenixProperties;
     private DataOverride dataOverride;
     private List<QuerySet> querySet = new ArrayList<>();
-    private WriteParams writeParams;
+    private WriteParams writeParams = null;
     private String name;
     private String tenantId;
     private String ddl;
 
     public Scenario() {
-        writeParams = new WriteParams();
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java
index 669a472..cef24f4 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java
@@ -19,7 +19,9 @@
 package org.apache.phoenix.pherf.result;
 
 import org.apache.phoenix.pherf.configuration.Query;
+import org.apache.phoenix.pherf.configuration.Scenario;
 import org.apache.phoenix.pherf.result.file.ResultFileDetails;
+import org.apache.phoenix.pherf.rules.RulesApplier;
 import org.apache.phoenix.pherf.util.PhoenixUtil;
 import org.apache.phoenix.util.DateUtil;
 
@@ -99,7 +101,7 @@ public class QueryResult extends Query {
         return totalRunTime / getThreadTimes().size();
     }
 
-    public List<ResultValue> getCsvRepresentation(ResultUtil util) {
+    public List<ResultValue> getCsvRepresentation(ResultUtil util, Scenario scenario, RulesApplier ruleApplier) {
         List<ResultValue> rowValues = new ArrayList<>();
         rowValues.add(new ResultValue(util.convertNull(getStartTimeText())));
         rowValues.add(new ResultValue(util.convertNull(this.getQueryGroup())));
@@ -109,14 +111,14 @@ public class QueryResult extends Query {
         rowValues.add(new ResultValue(util.convertNull(String.valueOf(getAvgRunTimeInMs()))));
         rowValues.add(new ResultValue(util.convertNull(String.valueOf(getAvgMinRunTimeInMs()))));
         rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRunCount()))));
-        rowValues.add(new ResultValue(util.convertNull(String.valueOf(getExplainPlan()))));
+        rowValues.add(new ResultValue(util.convertNull(String.valueOf(getExplainPlan(scenario, ruleApplier)))));
         rowValues.add(new ResultValue(util.convertNull(String.valueOf(getResultRowCount()))));
         return rowValues;
     }
     
-    private String getExplainPlan() {
+    private String getExplainPlan(Scenario scenario, RulesApplier ruleApplier) {
     	try {
-			return pUtil.getExplainPlan(this);
+			return pUtil.getExplainPlan(this, scenario, ruleApplier);
 		} catch (SQLException e) {
 			e.printStackTrace();
 		}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
index 5e0f242..929f96a 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
@@ -22,6 +22,7 @@ import org.apache.phoenix.pherf.PherfConstants;
 import org.apache.phoenix.pherf.result.file.ResultFileDetails;
 import org.apache.phoenix.pherf.result.impl.CSVFileResultHandler;
 import org.apache.phoenix.pherf.result.impl.XMLResultHandler;
+import org.apache.phoenix.pherf.rules.RulesApplier;
 import org.apache.phoenix.util.InstanceResolver;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -81,18 +82,23 @@ public class ResultManager {
         }
     }
 
+    
+    public synchronized void write(DataModelResult result) throws Exception {
+    	write(result, null);
+    }
+    
     /**
      * Write out the result to each writer in the pool
      *
      * @param result {@link DataModelResult}
      * @throws Exception
      */
-    public synchronized void write(DataModelResult result) throws Exception {
+    public synchronized void write(DataModelResult result, RulesApplier ruleApplier) throws Exception {
         try {
             util.ensureBaseResultDirExists();
             final DataModelResult dataModelResultCopy = new DataModelResult(result);
             for (ResultHandler handler : resultHandlers) {
-                util.write(handler, dataModelResultCopy);
+                util.write(handler, dataModelResultCopy, ruleApplier);
             }
         } finally {
             for (ResultHandler handler : resultHandlers) {
@@ -108,13 +114,17 @@ public class ResultManager {
         }
     }
 
+    public synchronized void write(List<DataModelResult> dataModelResults) throws Exception {
+    	write(dataModelResults, null);
+    }
+    
     /**
      * Write a combined set of results for each result in the list.
      *
      * @param dataModelResults List<{@link DataModelResult > </>}
      * @throws Exception
      */
-    public synchronized void write(List<DataModelResult> dataModelResults) throws Exception {
+    public synchronized void write(List<DataModelResult> dataModelResults, RulesApplier rulesApplier) throws Exception {
         util.ensureBaseResultDirExists();
 
         CSVFileResultHandler detailsCSVWriter = null;
@@ -123,7 +133,7 @@ public class ResultManager {
             detailsCSVWriter.setResultFileDetails(ResultFileDetails.CSV_DETAILED_PERFORMANCE);
             detailsCSVWriter.setResultFileName(PherfConstants.COMBINED_FILE_NAME);
             for (DataModelResult dataModelResult : dataModelResults) {
-                util.write(detailsCSVWriter, dataModelResult);
+                util.write(detailsCSVWriter, dataModelResult, rulesApplier);
             }
         } finally {
             if (detailsCSVWriter != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
index 0c2a7b8..30988ef 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
@@ -23,6 +23,7 @@ import org.apache.phoenix.pherf.PherfConstants;
 import org.apache.phoenix.pherf.result.file.ResultFileDetails;
 import org.apache.phoenix.pherf.result.impl.CSVFileResultHandler;
 import org.apache.phoenix.pherf.result.impl.CSVResultHandler;
+import org.apache.phoenix.pherf.rules.RulesApplier;
 import org.apache.phoenix.pherf.util.PhoenixUtil;
 
 import java.io.File;
@@ -117,7 +118,7 @@ public class ResultUtil {
         }
     }
 
-    public synchronized void write(ResultHandler resultHandler, DataModelResult dataModelResult)
+    public synchronized void write(ResultHandler resultHandler, DataModelResult dataModelResult, RulesApplier ruleApplier)
             throws Exception {
         ResultFileDetails resultFileDetails = resultHandler.getResultFileDetails();
         switch (resultFileDetails) {
@@ -126,7 +127,7 @@ public class ResultUtil {
         case CSV_DETAILED_FUNCTIONAL:
             List<List<ResultValue>>
                     rowDetails =
-                    getCSVResults(dataModelResult, resultFileDetails);
+                    getCSVResults(dataModelResult, resultFileDetails, ruleApplier);
             for (List<ResultValue> row : rowDetails) {
                 Result
                         result =
@@ -199,7 +200,7 @@ public class ResultUtil {
     }
 
     private List<List<ResultValue>> getCSVResults(DataModelResult dataModelResult,
-            ResultFileDetails resultFileDetails) {
+            ResultFileDetails resultFileDetails, RulesApplier ruleApplier) {
         List<List<ResultValue>> rowList = new ArrayList<>();
 
         for (ScenarioResult result : dataModelResult.getScenarioResult()) {
@@ -207,7 +208,7 @@ public class ResultUtil {
                 for (QueryResult queryResult : querySetResult.getQueryResults()) {
                     switch (resultFileDetails) {
                     case CSV_AGGREGATE_PERFORMANCE:
-                        List<ResultValue> csvResult = queryResult.getCsvRepresentation(this);
+                        List<ResultValue> csvResult = queryResult.getCsvRepresentation(this, result, ruleApplier);
                         rowList.add(csvResult);
                         break;
                     case CSV_DETAILED_PERFORMANCE:

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
index 454050b..2afc29a 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
@@ -39,7 +39,7 @@ import java.util.concurrent.atomic.AtomicLong;
 
 public class RulesApplier {
     private static final Logger logger = LoggerFactory.getLogger(RulesApplier.class);
-    private static final AtomicLong COUNTER = new AtomicLong(100);
+    private static final AtomicLong COUNTER = new AtomicLong(0);
 
     // Used to bail out of random distribution if it takes too long
     // This should never happen when distributions add up to 100
@@ -51,6 +51,9 @@ public class RulesApplier {
 
     private final XMLConfigParser parser;
     private final List<Map> modelList;
+    private final Map<String, Column> columnMap;
+    private String cachedScenarioOverrideName;
+    private Map<DataTypeMapping, List> scenarioOverrideMap;
 
 
     public RulesApplier(XMLConfigParser parser) {
@@ -60,15 +63,39 @@ public class RulesApplier {
     public RulesApplier(XMLConfigParser parser, long seed) {
         this.parser = parser;
         this.modelList = new ArrayList<Map>();
+        this.columnMap = new HashMap<String, Column>();
         this.rndNull = new Random(seed);
         this.rndVal = new Random(seed);
         this.randomDataGenerator = new RandomDataGenerator();
+        this.cachedScenarioOverrideName = null;
         populateModelList();
     }
 
     public List<Map> getModelList() {
         return Collections.unmodifiableList(this.modelList);
     }
+    
+    private Map<DataTypeMapping, List> getCachedScenarioOverrides(Scenario scenario) {
+    	if (this.cachedScenarioOverrideName == null || this.cachedScenarioOverrideName != scenario.getName()) {
+    		this.cachedScenarioOverrideName = scenario.getName();
+    		this.scenarioOverrideMap = new HashMap<DataTypeMapping, List>();
+
+    	       if (scenario.getDataOverride() != null) {
+				for (Column column : scenario.getDataOverride().getColumn()) {
+					List<Column> cols;
+					DataTypeMapping type = column.getType();
+					if (this.scenarioOverrideMap.containsKey(type)) {
+						this.scenarioOverrideMap.get(type).add(column);
+					} else {
+						cols = new LinkedList<Column>();
+						cols.add(column);
+						this.scenarioOverrideMap.put(type, cols);
+					}
+				}
+			}
+    	}
+		return scenarioOverrideMap;
+    }
 
 
     /**
@@ -84,11 +111,26 @@ public class RulesApplier {
      */
     public DataValue getDataForRule(Scenario scenario, Column phxMetaColumn) throws Exception {
         // TODO Make a Set of Rules that have already been applied so that so we don't generate for every value
-
+    	
         List<Scenario> scenarios = parser.getScenarios();
         DataValue value = null;
         if (scenarios.contains(scenario)) {
             logger.debug("We found a correct Scenario");
+            
+            Map<DataTypeMapping, List> overrideRuleMap = this.getCachedScenarioOverrides(scenario);
+            
+            if (overrideRuleMap != null) {
+	            List<Column> overrideRuleList = this.getCachedScenarioOverrides(scenario).get(phxMetaColumn.getType());
+	            
+				if (overrideRuleList != null && overrideRuleList.contains(phxMetaColumn)) {
+					logger.debug("We found a correct override column rule");
+					Column columnRule = getColumnForRuleOverride(overrideRuleList, phxMetaColumn);
+					if (columnRule != null) {
+						return getDataValue(columnRule);
+					}
+				}
+            }
+            
             // Assume the first rule map
             Map<DataTypeMapping, List> ruleMap = modelList.get(0);
             List<Column> ruleList = ruleMap.get(phxMetaColumn.getType());
@@ -107,6 +149,7 @@ public class RulesApplier {
             }
 
         }
+        
         return value;
     }
 
@@ -140,19 +183,9 @@ public class RulesApplier {
 
         switch (column.getType()) {
             case VARCHAR:
-                // Use the specified data values from configs if they exist
-                if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) {
-                    data = pickDataValueFromList(dataValues);
-                } else {
-                    Preconditions.checkArgument(length > 0, "length needs to be > 0");
-                    if (column.getDataSequence() == DataSequence.SEQUENTIAL) {
-                        data = getSequentialDataValue(column);
-                    } else {
-                        data = getRandomDataValue(column);
-                    }
-                }
-                break;
+            case VARBINARY:
             case CHAR:
+                // Use the specified data values from configs if they exist
                 if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) {
                     data = pickDataValueFromList(dataValues);
                 } else {
@@ -164,6 +197,17 @@ public class RulesApplier {
                     }
                 }
                 break;
+            case VARCHAR_ARRAY:
+            	//only list datavalues are supported
+            	String arr = "";
+            	for (DataValue dv : dataValues) {
+            		arr += "," + dv.getValue();
+            	}
+            	if (arr.startsWith(",")) {
+            		arr = arr.replaceFirst(",", "");
+            	}
+            	data = new DataValue(column.getType(), arr);
+            	break;
             case DECIMAL:
                 if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) {
                     data = pickDataValueFromList(dataValues);
@@ -171,8 +215,8 @@ public class RulesApplier {
                     int precision = column.getPrecision();
                     double minDbl = column.getMinValue();
                     Preconditions.checkArgument((precision > 0) && (precision <= 18), "Precision must be between 0 and 18");
-                    Preconditions.checkArgument(minDbl >= 0, "minvalue must be set in configuration");
-                    Preconditions.checkArgument(column.getMaxValue() > 0, "maxValue must be set in configuration");
+                    Preconditions.checkArgument(minDbl >= 0, "minvalue must be set in configuration for decimal");
+                    Preconditions.checkArgument(column.getMaxValue() > 0, "maxValue must be set in configuration decimal");
                     StringBuilder maxValueStr = new StringBuilder();
 
                     for (int i = 0; i < precision; i++) {
@@ -188,22 +232,34 @@ public class RulesApplier {
                 if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) {
                     data = pickDataValueFromList(dataValues);
                 } else {
-                    int minInt = column.getMinValue();
-                    int maxInt = column.getMaxValue();
-                    Preconditions.checkArgument((minInt > 0) && (maxInt > 0), "min and max values need to be set in configuration");
+                    int minInt = (int) column.getMinValue();
+                    int maxInt = (int) column.getMaxValue();
+                    Preconditions.checkArgument((minInt > 0) && (maxInt > 0), "min and max values need to be set in configuration for integers " + column.getName());
                     int intVal = RandomUtils.nextInt(minInt, maxInt);
                     data = new DataValue(column.getType(), String.valueOf(intVal));
                 }
                 break;
+            case UNSIGNED_LONG:
+                if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) {
+                    data = pickDataValueFromList(dataValues);
+                } else {
+                    long minLong = column.getMinValue();
+                    long maxLong = column.getMaxValue();
+                    Preconditions.checkArgument((minLong > 0) && (maxLong > 0), "min and max values need to be set in configuration for unsigned_longs " + column.getName());
+                    long longVal = RandomUtils.nextLong(minLong, maxLong);
+                    data = new DataValue(column.getType(), String.valueOf(longVal));
+                }
+                break;
             case DATE:
+            case TIMESTAMP:
                 if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) {
                     data = pickDataValueFromList(dataValues);
                     // Check if date has right format or not
                     data.setValue(checkDatePattern(data.getValue()));
                 } else if (column.getUseCurrentDate() != true){
-                    int minYear = column.getMinValue();
-                    int maxYear = column.getMaxValue();
-                    Preconditions.checkArgument((minYear > 0) && (maxYear > 0), "min and max values need to be set in configuration");
+                    int minYear = (int) column.getMinValue();
+                    int maxYear = (int) column.getMaxValue();
+                    Preconditions.checkArgument((minYear > 0) && (maxYear > 0), "min and max values need to be set in configuration for date/timestamps " + column.getName());
 
                     String dt = generateRandomDate(minYear, maxYear);
                     data = new DataValue(column.getType(), dt);
@@ -353,13 +409,15 @@ public class RulesApplier {
         if (!modelList.isEmpty()) {
             return;
         }
-
+        
         // Support for multiple models, but rules are only relevant each model
         for (DataModel model : parser.getDataModels()) {
 
             // Step 1
             final Map<DataTypeMapping, List> ruleMap = new HashMap<DataTypeMapping, List>();
             for (Column column : model.getDataMappingColumns()) {
+            	columnMap.put(column.getName(), column);
+            	
                 List<Column> cols;
                 DataTypeMapping type = column.getType();
                 if (ruleMap.containsKey(type)) {
@@ -382,7 +440,33 @@ public class RulesApplier {
         List<Column> ruleList = ruleMap.get(phxMetaColumn.getType());
         return getColumnForRule(ruleList, phxMetaColumn);
     }
+    
+    public Column getRule(String columnName) {
+    	return getRule(columnName, null);
+    }
+    
+    public Column getRule(String columnName, Scenario scenario) {
+    	if (null != scenario && null != scenario.getDataOverride()) {
+    		for (Column column: scenario.getDataOverride().getColumn()) {
+    			if (column.getName().equals(columnName)) {
+    				return column;
+    			}
+    		}
+    	}
+
+    	return columnMap.get(columnName);
+    }
 
+    private Column getColumnForRuleOverride(List<Column> ruleList, Column phxMetaColumn) {
+        for (Column columnRule : ruleList) {
+            if (columnRule.getName().equals(phxMetaColumn.getName())) {
+                return new Column(columnRule);
+            }
+        }
+
+       	return null;
+    }
+    
     private Column getColumnForRule(List<Column> ruleList, Column phxMetaColumn) {
 
         // Column pointer to head of list
@@ -400,7 +484,7 @@ public class RulesApplier {
             ruleAppliedColumn.mutate(columnRule);
         }
 
-        return ruleAppliedColumn;
+       	return ruleAppliedColumn;
     }
 
     /**
@@ -414,10 +498,12 @@ public class RulesApplier {
         DataValue data = null;
         long inc = COUNTER.getAndIncrement();
         String strInc = String.valueOf(inc);
-        String varchar = RandomStringUtils.randomAlphanumeric(column.getLength() - strInc.length());
-        varchar = (column.getPrefix() != null) ? column.getPrefix() + strInc + varchar :
-                strInc + varchar;
-
+		int paddedLength = column.getLengthExcludingPrefix();
+		String strInc1 = StringUtils.leftPad(strInc, paddedLength, "0");
+		String strInc2 = StringUtils.right(strInc1, column.getLengthExcludingPrefix());
+        String varchar = (column.getPrefix() != null) ? column.getPrefix() + strInc2:
+                strInc2;
+        
         // Truncate string back down if it exceeds length
         varchar = StringUtils.left(varchar,column.getLength());
         data = new DataValue(column.getType(), varchar);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
index 439f87e..5ccdaaa 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
@@ -62,7 +62,7 @@ public class SchemaReader {
     public void applySchema() throws Exception {
         Connection connection = null;
         try {
-            connection = pUtil.getConnection();
+            connection = pUtil.getConnection(null);
             for (Path file : resourceList) {
                 logger.info("\nApplying schema to file: " + file);
                 pUtil.executeStatement(resourceToString(file), connection);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
index df18544..bc9a5da 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
@@ -21,6 +21,10 @@ package org.apache.phoenix.pherf.util;
 import org.apache.phoenix.pherf.PherfConstants;
 import org.apache.phoenix.pherf.configuration.*;
 import org.apache.phoenix.pherf.jmx.MonitorManager;
+import org.apache.phoenix.pherf.result.DataLoadThreadTime;
+import org.apache.phoenix.pherf.result.DataLoadTimeSummary;
+import org.apache.phoenix.pherf.rules.RulesApplier;
+import org.apache.phoenix.pherf.util.GoogleChartGenerator.Node;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -28,6 +32,7 @@ import java.sql.*;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 import java.util.Properties;
 
@@ -42,6 +47,9 @@ public class PhoenixUtil {
     private static PhoenixUtil instance;
     private static boolean useThinDriver;
     private static String queryServerUrl;
+    private static final String ASYNC_KEYWORD = "ASYNC";
+    private static final int ONE_MIN_IN_MS = 60000;
+    private static String CurrentSCN = null;
 
     private PhoenixUtil() {
         this(false);
@@ -78,10 +86,14 @@ public class PhoenixUtil {
     }
 
     public Connection getConnection(String tenantId) throws Exception {
-        return getConnection(tenantId, testEnabled);
+        return getConnection(tenantId, testEnabled, null);
     }
 
-    private Connection getConnection(String tenantId, boolean testEnabled) throws Exception {
+    public Connection getConnection(String tenantId, Map<String, String> phoenixProperty) throws Exception {
+        return getConnection(tenantId, testEnabled, phoenixProperty);
+    }
+
+    public Connection getConnection(String tenantId, boolean testEnabled, Map<String, String> phoenixProperty) throws Exception {
         if (useThinDriver) {
             if (null == queryServerUrl) {
                 throw new IllegalArgumentException("QueryServer URL must be set before" +
@@ -104,6 +116,16 @@ public class PhoenixUtil {
                 props.setProperty("TenantId", tenantId);
                 logger.debug("\nSetting tenantId to " + tenantId);
             }
+
+            if (phoenixProperty != null) {
+            	for (Map.Entry<String, String> phxProperty: phoenixProperty.entrySet()) {
+            		props.setProperty(phxProperty.getKey(), phxProperty.getValue());
+					System.out.println("Setting connection property "
+							+ phxProperty.getKey() + " to "
+							+ phxProperty.getValue());
+            	}
+            }
+
             String url = "jdbc:phoenix:" + zookeeper + (testEnabled ? ";test=true" : "");
             return DriverManager.getConnection(url, props);
         }
@@ -224,7 +246,7 @@ public class PhoenixUtil {
     public ResultSet getColumnsMetaData(String schemaName, String tableName, Connection connection)
             throws SQLException {
         DatabaseMetaData dbmd = connection.getMetaData();
-        ResultSet resultSet = dbmd.getColumns(null, schemaName, tableName, null);
+        ResultSet resultSet = dbmd.getColumns(null, schemaName.toUpperCase(), tableName.toUpperCase(), null);
         return resultSet;
     }
 
@@ -237,7 +259,7 @@ public class PhoenixUtil {
             while (resultSet.next()) {
                 Column column = new Column();
                 column.setName(resultSet.getString("COLUMN_NAME"));
-                column.setType(DataTypeMapping.valueOf(resultSet.getString("TYPE_NAME")));
+                column.setType(DataTypeMapping.valueOf(resultSet.getString("TYPE_NAME").replace(" ", "_")));
                 column.setLength(resultSet.getInt("COLUMN_SIZE"));
                 columnList.add(column);
             }
@@ -329,21 +351,35 @@ public class PhoenixUtil {
         executeStatement("UPDATE STATISTICS " + tableName, scenario);
     }
 
+    public String getExplainPlan(Query query) throws SQLException {
+    	return getExplainPlan(query, null, null);
+    }
+
     /**
      * Get explain plan for a query
      *
      * @param query
+     * @param ruleApplier
+     * @param scenario
      * @return
      * @throws SQLException
      */
-    public String getExplainPlan(Query query) throws SQLException {
+    public String getExplainPlan(Query query, Scenario scenario, RulesApplier ruleApplier) throws SQLException {
         Connection conn = null;
         ResultSet rs = null;
         PreparedStatement statement = null;
         StringBuilder buf = new StringBuilder();
         try {
             conn = getConnection(query.getTenantId());
-            statement = conn.prepareStatement("EXPLAIN " + query.getStatement());
+            String explainQuery;
+            if (scenario != null && ruleApplier != null) {
+            	explainQuery = query.getDynamicStatement(ruleApplier, scenario);
+            }
+            else {
+            	explainQuery = query.getStatement();
+            }
+
+            statement = conn.prepareStatement("EXPLAIN " + explainQuery);
             rs = statement.executeQuery();
             while (rs.next()) {
                 buf.append(rs.getString(1).trim().replace(",", "-"));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
index 24c68dc..7b9313f 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
@@ -28,10 +28,14 @@ import org.apache.phoenix.pherf.result.DataModelResult;
 import org.apache.phoenix.pherf.result.ResultManager;
 import org.apache.phoenix.pherf.result.RunTime;
 import org.apache.phoenix.pherf.result.ThreadTime;
+import org.apache.phoenix.pherf.rules.RulesApplier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
+import org.apache.phoenix.pherf.PherfConstants.GeneratePhoenixStats;
 import org.apache.phoenix.pherf.configuration.Query;
+import org.apache.phoenix.pherf.configuration.Scenario;
+import org.apache.phoenix.pherf.configuration.WriteParams;
+import org.apache.phoenix.pherf.configuration.XMLConfigParser;
 import org.apache.phoenix.pherf.util.PhoenixUtil;
 
 class MultiThreadedRunner implements Runnable {
@@ -45,6 +49,11 @@ class MultiThreadedRunner implements Runnable {
     private long executionDurationInMs;
     private static long lastResultWritten = System.currentTimeMillis() - 1000;
     private final ResultManager resultManager;
+    private final RulesApplier ruleApplier;
+    private final Scenario scenario;
+    private final WorkloadExecutor workloadExecutor;
+    private final XMLConfigParser parser;
+    
 
     /**
      * MultiThreadedRunner
@@ -55,16 +64,21 @@ class MultiThreadedRunner implements Runnable {
      * @param threadTime
      * @param numberOfExecutions
      * @param executionDurationInMs
+     * @param ruleRunner 
      */
     MultiThreadedRunner(String threadName, Query query, DataModelResult dataModelResult,
-            ThreadTime threadTime, long numberOfExecutions, long executionDurationInMs, boolean writeRuntimeResults) {
+            ThreadTime threadTime, long numberOfExecutions, long executionDurationInMs, boolean writeRuntimeResults, RulesApplier ruleApplier, Scenario scenario, WorkloadExecutor workloadExecutor, XMLConfigParser parser) {
         this.query = query;
         this.threadName = threadName;
         this.threadTime = threadTime;
         this.dataModelResult = dataModelResult;
         this.numberOfExecutions = numberOfExecutions;
         this.executionDurationInMs = executionDurationInMs;
+        this.ruleApplier = ruleApplier;
+        this.scenario = scenario;
        	this.resultManager = new ResultManager(dataModelResult.getName(), writeRuntimeResults);
+       	this.workloadExecutor = workloadExecutor;
+       	this.parser = parser;
     }
 
     /**
@@ -81,7 +95,7 @@ class MultiThreadedRunner implements Runnable {
                 synchronized (resultManager) {
                     timedQuery();
                     if ((System.currentTimeMillis() - lastResultWritten) > 1000) {
-                        resultManager.write(dataModelResult);
+                        resultManager.write(dataModelResult, ruleApplier);
                         lastResultWritten = System.currentTimeMillis();
                     }
                 }
@@ -108,7 +122,7 @@ class MultiThreadedRunner implements Runnable {
     private void timedQuery() throws Exception {
         boolean
                 isSelectCountStatement =
-                query.getStatement().toUpperCase().trim().contains("COUNT(*)") ? true : false;
+                query.getStatement().toUpperCase().trim().contains("COUNT(") ? true : false;
 
         Connection conn = null;
         PreparedStatement statement = null;
@@ -119,8 +133,17 @@ class MultiThreadedRunner implements Runnable {
         long resultRowCount = 0;
 
         try {
-            conn = pUtil.getConnection(query.getTenantId());
-            statement = conn.prepareStatement(query.getStatement());
+            conn = pUtil.getConnection(query.getTenantId(), scenario.getPhoenixProperties());
+            conn.setAutoCommit(true);
+            final String statementString = query.getDynamicStatement(ruleApplier, scenario);
+            statement = conn.prepareStatement(statementString);
+            logger.info("Executing: " + statementString);
+            
+            if (scenario.getWriteParams() != null) {
+            	Workload writes = new WriteWorkload(PhoenixUtil.create(), parser, scenario, GeneratePhoenixStats.NO);
+            	workloadExecutor.add(writes);
+            }
+            
             boolean isQuery = statement.execute();
             if (isQuery) {
                 rs = statement.getResultSet();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
index 7f861f1..8d0ced5 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.phoenix.pherf.PherfConstants.GeneratePhoenixStats;
 import org.apache.phoenix.pherf.configuration.*;
 import org.apache.phoenix.pherf.result.*;
+import org.apache.phoenix.pherf.rules.RulesApplier;
 import org.apache.phoenix.pherf.util.PhoenixUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -42,6 +43,7 @@ public class QueryExecutor implements Workload {
     private final PhoenixUtil util;
     private final WorkloadExecutor workloadExecutor;
     private final boolean writeRuntimeResults;
+    private RulesApplier ruleApplier;
 
     public QueryExecutor(XMLConfigParser parser, PhoenixUtil util,
             WorkloadExecutor workloadExecutor) {
@@ -64,6 +66,7 @@ public class QueryExecutor implements Workload {
         this.util = util;
         this.workloadExecutor = workloadExecutor;
         this.writeRuntimeResults = writeRuntimeResults;
+        this.ruleApplier = new RulesApplier(parser);
     }
 
     @Override
@@ -143,17 +146,6 @@ public class QueryExecutor implements Workload {
                         ScenarioResult scenarioResult = new ScenarioResult(scenario);
                         scenarioResult.setPhoenixProperties(phoenixProperty);
                         dataModelResult.getScenarioResult().add(scenarioResult);
-                        WriteParams writeParams = scenario.getWriteParams();
-
-                        if (writeParams != null) {
-                            int writerThreadCount = writeParams.getWriterThreadCount();
-                            for (int i = 0; i < writerThreadCount; i++) {
-                                logger.debug("Inserting write workload ( " + i + " ) of ( "
-                                        + writerThreadCount + " )");
-                                Workload writes = new WriteWorkload(PhoenixUtil.create(), parser, GeneratePhoenixStats.NO);
-                                workloadExecutor.add(writes);
-                            }
-                        }
 
                         for (QuerySet querySet : scenario.getQuerySet()) {
                             QuerySetResult querySetResult = new QuerySetResult(querySet);
@@ -161,14 +153,14 @@ public class QueryExecutor implements Workload {
 
                             util.executeQuerySetDdls(querySet);
                             if (querySet.getExecutionType() == ExecutionType.SERIAL) {
-                                executeQuerySetSerial(dataModelResult, querySet, querySetResult);
+                                executeQuerySetSerial(dataModelResult, querySet, querySetResult, scenario);
                             } else {
-                                executeQuerySetParallel(dataModelResult, querySet, querySetResult);
+                                executeQuerySetParallel(dataModelResult, querySet, querySetResult, scenario);
                             }
                         }
-                        resultManager.write(dataModelResult);
+                        resultManager.write(dataModelResult, ruleApplier);
                     }
-                    resultManager.write(dataModelResults);
+                    resultManager.write(dataModelResults, ruleApplier);
                     resultManager.flush();
                 } catch (Exception e) {
                     logger.warn("", e);
@@ -183,10 +175,11 @@ public class QueryExecutor implements Workload {
      * @param dataModelResult
      * @param querySet
      * @param querySetResult
+     * @param scenario 
      * @throws InterruptedException
      */
     protected void executeQuerySetSerial(DataModelResult dataModelResult, QuerySet querySet,
-            QuerySetResult querySetResult) throws InterruptedException {
+            QuerySetResult querySetResult, Scenario scenario) throws InterruptedException {
         for (Query query : querySet.getQuery()) {
             QueryResult queryResult = new QueryResult(query);
             querySetResult.getQueryResults().add(queryResult);
@@ -200,7 +193,7 @@ public class QueryExecutor implements Workload {
                     Runnable
                             thread =
                             executeRunner((i + 1) + "," + cr, dataModelResult, queryResult,
-                                    querySetResult);
+                                    querySetResult, scenario);
                     threads.add(workloadExecutor.getPool().submit(thread));
                 }
 
@@ -224,7 +217,7 @@ public class QueryExecutor implements Workload {
      * @throws InterruptedException
      */
     protected void executeQuerySetParallel(DataModelResult dataModelResult, QuerySet querySet,
-            QuerySetResult querySetResult) throws InterruptedException {
+            QuerySetResult querySetResult, Scenario scenario) throws InterruptedException {
         for (int cr = querySet.getMinConcurrency(); cr <= querySet.getMaxConcurrency(); cr++) {
             List<Future> threads = new ArrayList<>();
             for (int i = 0; i < cr; i++) {
@@ -235,7 +228,7 @@ public class QueryExecutor implements Workload {
                     Runnable
                             thread =
                             executeRunner((i + 1) + "," + cr, dataModelResult, queryResult,
-                                    querySetResult);
+                                    querySetResult, scenario);
                     threads.add(workloadExecutor.getPool().submit(thread));
                 }
 
@@ -257,10 +250,11 @@ public class QueryExecutor implements Workload {
      * @param dataModelResult
      * @param queryResult
      * @param querySet
+     * @param scenario 
      * @return
      */
     protected Runnable executeRunner(String name, DataModelResult dataModelResult,
-            QueryResult queryResult, QuerySet querySet) {
+            QueryResult queryResult, QuerySet querySet, Scenario scenario) {
         ThreadTime threadTime = new ThreadTime();
         queryResult.getThreadTimes().add(threadTime);
         threadTime.setThreadName(name);
@@ -271,7 +265,7 @@ public class QueryExecutor implements Workload {
             thread =
                     new MultiThreadedRunner(threadTime.getThreadName(), queryResult,
                             dataModelResult, threadTime, querySet.getNumberOfExecutions(),
-                            querySet.getExecutionDurationInMs(), writeRuntimeResults);
+                            querySet.getExecutionDurationInMs(), writeRuntimeResults, ruleApplier, scenario, workloadExecutor, parser);
         } else {
             thread =
                     new MultithreadedDiffer(threadTime.getThreadName(), queryResult, threadTime,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
index 69d35cc..205b481 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
@@ -19,10 +19,12 @@
 package org.apache.phoenix.pherf.workload;
 
 import java.math.BigDecimal;
+import java.sql.Array;
 import java.sql.Connection;
 import java.sql.Date;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.sql.Types;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
@@ -33,6 +35,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.pherf.PherfConstants;
 import org.apache.phoenix.pherf.PherfConstants.GeneratePhoenixStats;
 import org.apache.phoenix.pherf.configuration.Column;
@@ -108,21 +111,27 @@ public class WriteWorkload implements Workload {
         this.rulesApplier = new RulesApplier(parser);
         this.resultUtil = new ResultUtil();
         this.generateStatistics = generateStatistics;
-
+        int size = Integer.parseInt(properties.getProperty("pherf.default.dataloader.threadpool"));
+        
         // Overwrite defaults properties with those given in the configuration. This indicates the
         // scenario is a R/W mixed workload.
         if (scenario != null) {
             this.scenario = scenario;
             writeParams = scenario.getWriteParams();
-            threadSleepDuration = writeParams.getThreadSleepDuration();
+            if (writeParams != null) {
+            	threadSleepDuration = writeParams.getThreadSleepDuration();
+            	size = writeParams.getWriterThreadCount();
+            }
+            else {
+            	threadSleepDuration = 0;
+            }
+            	
         } else {
             writeParams = null;
             this.scenario = null;
             threadSleepDuration = 0;
         }
 
-        int size = Integer.parseInt(properties.getProperty("pherf.default.dataloader.threadpool"));
-
         // Should addBatch/executeBatch be used? Default: false
         this.useBatchApi = Boolean.getBoolean(USE_BATCH_API_PROPERTY);
 
@@ -379,17 +388,50 @@ public class WriteWorkload implements Workload {
                     statement.setInt(count, Integer.parseInt(dataValue.getValue()));
                 }
                 break;
+            case UNSIGNED_LONG:
+                if (dataValue.getValue().equals("")) {
+                    statement.setNull(count, Types.LONGVARCHAR);
+                } else {
+                    statement.setLong(count, Long.parseLong(dataValue.getValue()));
+                }
+                break;
             case DATE:
                 if (dataValue.getValue().equals("")) {
                     statement.setNull(count, Types.DATE);
                 } else {
                     Date
                             date =
-                            new java.sql.Date(
-                                    simpleDateFormat.parse(dataValue.getValue()).getTime());
+                            new java.sql.Date(simpleDateFormat.parse(dataValue.getValue()).getTime());
                     statement.setDate(count, date);
                 }
                 break;
+            case VARCHAR_ARRAY:
+                if (dataValue.getValue().equals("")) {
+                    statement.setNull(count, Types.ARRAY);
+                } else {
+                    Array
+                            arr =
+                            statement.getConnection().createArrayOf("VARCHAR", dataValue.getValue().split(","));
+                    statement.setArray(count, arr);
+                }
+            	break;
+            case VARBINARY:
+                if (dataValue.getValue().equals("")) {
+                    statement.setNull(count, Types.VARBINARY);
+                } else {
+                    statement.setBytes(count, dataValue.getValue().getBytes());
+                }
+                break;
+            case TIMESTAMP:
+                if (dataValue.getValue().equals("")) {
+                    statement.setNull(count, Types.TIMESTAMP);
+                } else {
+                    java.sql.Timestamp
+                            ts =
+                            new java.sql.Timestamp(simpleDateFormat.parse(dataValue.getValue()).getTime());
+                    statement.setTimestamp(count, ts);
+                }
+                break;
             default:
                 break;
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml b/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml
index 8f93685..04e02dc 100644
--- a/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml
+++ b/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml
@@ -340,8 +340,6 @@
 
         </scenario>
         <scenario tableName="PHERF.PHERF_PROD_TEST_UNSALTED" rowCount="10">
-            <!-- Scenario level rule overrides will be unsupported in V1.
-                    You can use the general datamappings in the mean time-->
             <dataOverride>
                 <column>
                     <type>VARCHAR</type>
@@ -361,8 +359,20 @@
                     <name>TENANT_ID</name>
                 </column>
             </dataOverride>
+
+            <!--  Pre and post scenario indexes -->
+            <preScenarioDdls>
+                <ddl>CREATE INDEX IDX_DIVISION ON PHERF.PHERF_PROD_TEST_UNSALTED (DIVISION)</ddl>
+            </preScenarioDdls>
+
+            <postScenarioDdls>
+                <ddl>CREATE INDEX IDX_OLDVAL_STRING ON PHERF.PHERF_PROD_TEST_UNSALTED (OLDVAL_STRING)</ddl>
+                <ddl>CREATE INDEX IDX_CONNECTION_ID ON PHERF.PHERF_PROD_TEST_UNSALTED (CONNECTION_ID)</ddl>
+            </postScenarioDdls>
+
             <!--Minimum of executionDurationInMs or numberOfExecutions. Which ever is reached first -->
             <querySet concurrency="1" executionType="PARALLEL" executionDurationInMs="60000" numberOfExecutions="100">
+                <query statement="select count(*) from PHERF.PHERF_PROD_TEST_UNSALTED WHERE TENANT_ID=[TENANT_ID] AND TENANT_ID=[TENANT_ID]"/>
                 <!--  Aggregate queries on a per tenant basis -->
                 <query tenantId="00Dxx0000001gER"
                        ddl="CREATE VIEW IF NOT EXISTS PHERF.PHERF_TEST_VIEW_UNSALTED AS SELECT * FROM PHERF.PHERF_PROD_TEST_UNSALTED"

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java
index e573c07..35e8754 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java
@@ -18,6 +18,9 @@
 
 package org.apache.phoenix.pherf;
 
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.phoenix.pherf.configuration.Column;
 import org.apache.phoenix.pherf.configuration.DataTypeMapping;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
index bd17192..f9ee874 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
@@ -71,8 +71,8 @@ public class ConfigurationParserTest extends ResultBaseTest {
             assertTrue("Could not load the data columns from xml.",
                     (dataMappingColumns != null) && (dataMappingColumns.size() > 0));
             assertTrue("Could not load the data DataValue list from xml.",
-                    (dataMappingColumns.get(6).getDataValues() != null)
-                            && (dataMappingColumns.get(6).getDataValues().size() > 0));
+                    (dataMappingColumns.get(8).getDataValues() != null)
+                            && (dataMappingColumns.get(8).getDataValues().size() > 0));
 
             assertDateValue(dataMappingColumns);
             assertCurrentDateValue(dataMappingColumns);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
index 228cd58..f4b0e5c 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
@@ -24,8 +24,10 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Date;
 import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
@@ -245,6 +247,87 @@ public class RuleGeneratorTest {
                 testSet.size() == (threadCount * increments));
     }
 
+	@Test
+    public void testTimestampRule() throws Exception {
+    	SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+    	SimpleDateFormat df = new SimpleDateFormat("yyyy");
+        XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+        WriteWorkload loader = new WriteWorkload(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+        Scenario scenario = parser.getScenarios().get(0);
+
+        Column simPhxCol = new Column();
+        simPhxCol.setName("TS_DATE");
+        simPhxCol.setType(DataTypeMapping.TIMESTAMP);
+
+        // Run this 10 times gives a reasonable chance that all the values will appear at least once
+        for (int i = 0; i < 10; i++) {
+            DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol);
+            Date dt = simpleDateFormat.parse(value.getValue());
+            int year = Integer.parseInt(df.format(dt));
+            assertTrue("Got unexpected TS value" + value.getValue(), year >= 2020 && year <= 2025);
+        }
+    }
+	
+    @Test
+    public void testVarcharArray() throws Exception {
+
+        XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+        WriteWorkload loader = new WriteWorkload(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+        
+        // Run this 15 times gives a reasonable chance that all the values will appear at least once
+        for (int i = 0; i < 15; i++) {
+        	Column c = rulesApplier.getRule("VAR_ARRAY");
+            DataValue value = rulesApplier.getDataValue(c);
+            assertTrue("Got a value not in the list for the rule. :" + value.getValue(), value.getValue().equals("Foo,Bar"));
+        }
+    }	
+
+    @Test
+    public void testVarBinary() throws Exception {
+        List<String> expectedValues = new ArrayList();
+        for (int i=0; i<10; i++) {
+            expectedValues.add("VBOxx00" + i);
+        }
+
+        XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+        WriteWorkload loader = new WriteWorkload(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+        
+        for (int i = 0; i < 5; i++) {
+        	Column c = rulesApplier.getRule("VAR_BIN");
+            DataValue value = rulesApplier.getDataValue(c);
+            System.out.println(value.getValue());
+            assertTrue("Got a value not in the list for the rule. :" + value.getValue(), expectedValues.contains(value.getValue()));
+        }
+    }    
+
+    @Test
+    public void testPrefixSequence() throws Exception {
+        List<String> expectedValues = new ArrayList();
+        expectedValues.add("0F90000000000X0");
+        expectedValues.add("0F90000000000X1");
+        expectedValues.add("0F90000000000X2");
+        expectedValues.add("0F90000000000X3");
+        expectedValues.add("0F90000000000X4");
+        expectedValues.add("0F90000000000X5");
+        expectedValues.add("0F90000000000X6");
+        expectedValues.add("0F90000000000X7");
+        expectedValues.add("0F90000000000X8");
+        expectedValues.add("0F90000000000X9");
+
+        XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+        WriteWorkload loader = new WriteWorkload(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+        
+        // Run this 15 times gives a reasonable chance that all the values will appear at least once
+        for (int i = 0; i < 15; i++) {
+            DataValue value = rulesApplier.getDataValue(rulesApplier.getRule("NEWVAL_STRING"));
+            assertTrue("Got a value not in the list for the rule. :" + value.getValue(), expectedValues.contains(value.getValue()));
+        }
+    }
+	
     @Test
     public void testValueListRule() throws Exception {
         List<String> expectedValues = new ArrayList();
@@ -301,12 +384,30 @@ public class RuleGeneratorTest {
         assertEquals("Did not find the matching rule type.", rule.getType(), simPhxCol.getType());
         assertEquals("Rule contains incorrect length.", rule.getLength(), 10);
         assertEquals("Rule contains incorrect prefix.", rule.getPrefix(), "MYPRFX");
-
+        
         value = rulesApplier.getDataForRule(scenario, simPhxCol);
-        assertEquals("Value returned does not match rule.", value.getValue().length(), 10);
-        assertTrue("Value returned start with prefix.",
+        assertEquals("Value returned does not match rule.", 10, value.getValue().length());
+        assertTrue("Value returned start with prefix. " + value.getValue(),
                 StringUtils.startsWith(value.getValue(), rule.getPrefix()));
+        
+    }
+    
+    
+    @Test
+    public void testScenarioLevelRuleOverride() throws Exception {
+        XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+        WriteWorkload loader = new WriteWorkload(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+        Scenario scenario = parser.getScenarios().get(0);
+        
+        // Test scenario level overridden rule
+    	Column simPhxCol = new Column();
+        simPhxCol.setName("FIELD");
+        simPhxCol.setType(DataTypeMapping.VARCHAR);
+        DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol);
+        assertEquals("Override rule should contain field length of 5", 5, value.getValue().length());
     }
+    
 
     /**
      * Asserts that the value field is between the min/max value fields

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/resources/datamodel/test_schema.sql b/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
index 21034d9..fa9952b 100644
--- a/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
+++ b/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
@@ -20,9 +20,13 @@ CREATE TABLE IF NOT EXISTS PHERF.TEST_TABLE (
     PARENT_ID CHAR(15) NOT NULL,
     CREATED_DATE DATE NOT NULL,
     NOW_DATE DATE,
+    TS_DATE TIMESTAMP,
     PRESENT_DATE DATE,
     OTHER_ID CHAR(15),
     FIELD VARCHAR,
+    VAR_ARRAY VARCHAR ARRAY,
+    VAR_BIN VARBINARY,
+    DIVISION INTEGER,
     OLDVAL_STRING VARCHAR,
     NEWVAL_STRING VARCHAR,
     SOME_INT INTEGER

http://git-wip-us.apache.org/repos/asf/phoenix/blob/04029fb2/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/resources/scenario/test_scenario.xml b/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
index 34bf31a..895a7d8 100644
--- a/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
+++ b/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
@@ -33,6 +33,18 @@
             <name>GENERAL_CHAR</name>
         </column>
         <column>
+            <type>TIMESTAMP</type>
+            <!--SEQUENTIAL is unsupported for DATE -->
+            <dataSequence>RANDOM</dataSequence>
+            <!-- Number [0-100] that represents the probability of creating a null value -->
+            <!-- The higher the number, the more like the value will returned will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. i.e. never null -->
+            <nullChance>0</nullChance>
+            <minValue>2020</minValue>
+            <maxValue>2025</maxValue>
+            <name>GENERAL_TIMESTAMP</name>
+        </column>
+        <column>
             <type>DATE</type>
             <!--SEQUENTIAL is unsupported for DATE -->
             <dataSequence>RANDOM</dataSequence>
@@ -150,9 +162,22 @@
             <type>VARCHAR</type>
             <length>15</length>
             <userDefined>true</userDefined>
-            <dataSequence>RANDOM</dataSequence>
+            <dataSequence>SEQUENTIAL</dataSequence>
             <name>NEWVAL_STRING</name>
-            <prefix>TSTPRFX</prefix>
+            <prefix>0F90000000000X</prefix>
+        </column>
+        <column>
+            <type>VARCHAR_ARRAY</type>
+            <userDefined>true</userDefined>
+            <name>VAR_ARRAY</name>
+            <valuelist>
+                <datavalue>
+                    <value>Foo</value>
+                </datavalue>
+                <datavalue>
+                    <value>Bar</value>
+                </datavalue>
+            </valuelist>
         </column>
         <column>
             <type>CHAR</type>
@@ -181,6 +206,21 @@
             <name>OTHER_ID</name>
             <prefix>z0Oxx00</prefix>
         </column>
+        <column>
+            <type>VARBINARY</type>
+            <userDefined>true</userDefined>
+            <dataSequence>SEQUENTIAL</dataSequence>
+            <length>8</length>
+            <name>VAR_BIN</name>
+            <prefix>VBOxx00</prefix>
+        </column>
+        <column>
+           <type>VARCHAR</type>
+           <userDefined>true</userDefined>
+           <dataSequence>SEQUENTIAL</dataSequence>
+           <length>1</length>
+           <name>FIELD</name>
+       </column>
     </datamapping>
     <scenarios>
         <scenario tableName="PHERF.TEST_TABLE" rowCount="100" name="testScenarioRW">
@@ -191,7 +231,7 @@
                     <type>VARCHAR</type>
                     <userDefined>true</userDefined>
                     <dataSequence>RANDOM</dataSequence>
-                    <length>10</length>
+                    <length>5</length>
                     <name>FIELD</name>
                 </column>
             </dataOverride>
@@ -219,7 +259,7 @@
             </writeParams>
             <querySet concurrency="1" executionType="PARALLEL" executionDurationInMs="10000">
                 <query id="q3" statement="select count(*) from PHERF.TEST_TABLE"/>
-                <query id="q4" statement="select sum(DIVISION) from PHERF.TEST_TABLE"/>
+                <query id="q4" statement="select sum(SOME_INT) from PHERF.TEST_TABLE"/>
             </querySet>
 
         </scenario>


[7/7] phoenix git commit: PHOENIX-4571 Adds dependency on servlet-api for PQS

Posted by pb...@apache.org.
PHOENIX-4571 Adds dependency on servlet-api for PQS


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/06ecae7a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/06ecae7a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/06ecae7a

Branch: refs/heads/4.x-cdh5.11.2
Commit: 06ecae7a073560ea97e3f8818c29739c73166ad8
Parents: 04029fb
Author: Josh Elser <el...@apache.org>
Authored: Tue Jan 30 21:06:08 2018 +0000
Committer: Pedro Boado <pb...@apache.org>
Committed: Sun Feb 11 15:59:22 2018 +0000

----------------------------------------------------------------------
 phoenix-queryserver/pom.xml | 4 ++++
 pom.xml                     | 7 ++++++-
 2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/06ecae7a/phoenix-queryserver/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index a0866a3..7180b18 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -147,6 +147,10 @@
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
     </dependency>
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>javax.servlet-api</artifactId>
+    </dependency>
     <!-- for tests -->
     <dependency>
       <groupId>org.mockito</groupId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/06ecae7a/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 513460e..574f8ba 100644
--- a/pom.xml
+++ b/pom.xml
@@ -149,7 +149,7 @@
     <scala.binary.version>2.10</scala.binary.version>
     <stream.version>2.9.5</stream.version>
     <i18n-util.version>1.0.1</i18n-util.version>
-
+    <servlet.api.version>3.1.0</servlet.api.version>
     <!-- Test Dependencies -->
     <mockito-all.version>1.8.5</mockito-all.version>
     <junit.version>4.12</junit.version>
@@ -979,6 +979,11 @@
         <artifactId>i18n-util</artifactId>
         <version>${i18n-util.version}</version>
       </dependency>
+      <dependency>
+        <groupId>javax.servlet</groupId>
+        <artifactId>javax.servlet-api</artifactId>
+        <version>${servlet.api.version}</version>
+      </dependency>
     </dependencies>
   </dependencyManagement>