You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ap...@apache.org on 2015/05/20 19:58:11 UTC

[01/31] phoenix git commit: Changed version to 4.4.0-HBase-1.x-SNAPSHOT

Repository: phoenix
Updated Branches:
  refs/heads/4.3 52d183356 -> 33cb45d0e
  refs/heads/4.x-HBase-0.98 1b943dbf2 -> ff5d8b930
  refs/heads/4.x-HBase-1.0 deb478652 -> e9623da17
  refs/heads/4.x-HBase-1.x [created] 166425dba
  refs/heads/5.x-HBase-1.1 [created] bd974e7b7
  refs/heads/master c83ab9edb -> a4b4e0e2d


Changed version to 4.4.0-HBase-1.x-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/03fce013
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/03fce013
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/03fce013

Branch: refs/heads/4.x-HBase-1.x
Commit: 03fce013c3a0c4883d3d1e9ad037d81c471ef74f
Parents: 174d0e6
Author: Enis Soztutar <en...@apache.org>
Authored: Thu Mar 19 13:34:46 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Thu Mar 19 13:34:46 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/pom.xml | 2 +-
 phoenix-core/pom.xml     | 2 +-
 phoenix-flume/pom.xml    | 2 +-
 phoenix-pherf/pom.xml    | 2 +-
 phoenix-pig/pom.xml      | 2 +-
 pom.xml                  | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index a887dbf..96bb16f 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.4.0-HBase-0.98-SNAPSHOT</version>
+    <version>4.4.0-HBase-1.x-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-assembly</artifactId>
   <name>Phoenix Assembly</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 31b7afd..a325b27 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.4.0-HBase-0.98-SNAPSHOT</version>
+    <version>4.4.0-HBase-1.x-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-core</artifactId>
   <name>Phoenix Core</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 6e29227..af01f6b 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.4.0-HBase-0.98-SNAPSHOT</version>
+    <version>4.4.0-HBase-1.x-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-flume</artifactId>
   <name>Phoenix - Flume</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-pherf/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 26698b2..a0d521c 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <groupId>org.apache.phoenix</groupId>
         <artifactId>phoenix</artifactId>
-        <version>4.4.0-HBase-0.98-SNAPSHOT</version>
+        <version>4.4.0-HBase-1.x-SNAPSHOT</version>
     </parent>
 
     <artifactId>pherf</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 7a2072c..0709657 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.4.0-HBase-0.98-SNAPSHOT</version>
+    <version>4.4.0-HBase-1.x-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-pig</artifactId>
   <name>Phoenix - Pig</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f8f268d..92b0ed6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.phoenix</groupId>
   <artifactId>phoenix</artifactId>
-  <version>4.4.0-HBase-0.98-SNAPSHOT</version>
+  <version>4.4.0-HBase-1.x-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Phoenix</name>
   <description>A SQL layer over HBase</description>


[23/31] phoenix git commit: PHOENIX-1763 Support building with HBase-1.1.0 (Enis Soztutar)

Posted by ap...@apache.org.
PHOENIX-1763 Support building with HBase-1.1.0 (Enis Soztutar)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/41ad9188
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/41ad9188
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/41ad9188

Branch: refs/heads/5.x-HBase-1.1
Commit: 41ad9188a22a9e258b2d748f60d73c61726528fd
Parents: d147423
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed Apr 15 11:26:39 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed Apr 15 11:26:39 2015 -0700

----------------------------------------------------------------------
 phoenix-core/pom.xml                            | 17 +++--
 .../regionserver/IndexHalfStoreFileReader.java  | 31 ++++++--
 .../regionserver/IndexSplitTransaction.java     | 39 ++++++++--
 .../hbase/regionserver/LocalIndexMerger.java    |  3 +-
 .../cache/aggcache/SpillableGroupByCache.java   | 13 +++-
 .../phoenix/coprocessor/BaseRegionScanner.java  | 12 +--
 .../coprocessor/BaseScannerRegionObserver.java  | 77 +++++++++++---------
 .../coprocessor/DelegateRegionScanner.java      | 23 ++++--
 .../GroupedAggregateRegionObserver.java         | 53 ++++++++------
 .../coprocessor/HashJoinRegionScanner.java      | 60 ++++++++-------
 .../coprocessor/MetaDataRegionObserver.java     | 23 +++---
 .../phoenix/coprocessor/ScanRegionObserver.java | 11 ++-
 .../UngroupedAggregateRegionObserver.java       | 55 +++++++-------
 .../hbase/index/covered/data/LocalTable.java    |  2 +-
 .../index/covered/filter/FamilyOnlyFilter.java  |  6 +-
 .../index/scanner/FilteredKeyValueScanner.java  |  2 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |  6 +-
 .../iterate/RegionScannerResultIterator.java    |  9 ++-
 .../phoenix/schema/stats/StatisticsScanner.java | 10 ++-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  6 +-
 .../index/covered/TestLocalTableState.java      |  1 -
 .../covered/filter/TestFamilyOnlyFilter.java    | 12 +--
 .../index/write/TestWALRecoveryCaching.java     |  4 +-
 phoenix-flume/pom.xml                           |  9 ---
 phoenix-pig/pom.xml                             | 31 +++++---
 pom.xml                                         | 48 +++++++++++-
 26 files changed, 361 insertions(+), 202 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 45b8d73..22e6b60 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -350,16 +350,25 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-it</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-protocol</artifactId>
     </dependency>
     <dependency>
@@ -369,18 +378,16 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
@@ -391,13 +398,11 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 49e2022..9befc8c 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -47,11 +47,11 @@ import org.apache.phoenix.index.IndexMaintainer;
  * that sort lowest and 'top' is the second half of the file with keys that sort greater than those
  * of the bottom half. The top includes the split files midkey, of the key that follows if it does
  * not exist in the file.
- * 
+ *
  * <p>
  * This type works in tandem with the {@link Reference} type. This class is used reading while
  * Reference is used writing.
- * 
+ *
  * <p>
  * This file is not splitable. Calls to {@link #midkey()} return null.
  */
@@ -64,7 +64,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
     private final byte[] splitkey;
     private final byte[] splitRow;
     private final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers;
-    private final byte[][] viewConstants; 
+    private final byte[][] viewConstants;
     private final int offset;
     private final HRegionInfo regionInfo;
     private final byte[] regionStartKeyInHFile;
@@ -144,6 +144,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
             final HFileScanner delegate = s;
             public boolean atEnd = false;
 
+            @Override
             public ByteBuffer getKey() {
                 if (atEnd) {
                     return null;
@@ -160,7 +161,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 // If it is top store file replace the StartKey of the Key with SplitKey
                 return getChangedKey(delegate.getKeyValue(), changeBottomKeys);
             }
-            
+
             private ByteBuffer getChangedKey(Cell kv, boolean changeBottomKeys) {
                 // new KeyValue(row, family, qualifier, timestamp, type, value)
                 byte[] newRowkey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(kv, changeBottomKeys);
@@ -183,6 +184,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return keyReplacedStartKey;
             }
 
+            @Override
             public String getKeyString() {
                 if (atEnd) {
                     return null;
@@ -190,6 +192,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return Bytes.toStringBinary(getKey());
             }
 
+            @Override
             public ByteBuffer getValue() {
                 if (atEnd) {
                     return null;
@@ -197,6 +200,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return delegate.getValue();
             }
 
+            @Override
             public String getValueString() {
                 if (atEnd) {
                     return null;
@@ -204,6 +208,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return Bytes.toStringBinary(getValue());
             }
 
+            @Override
             public Cell getKeyValue() {
                 if (atEnd) {
                     return null;
@@ -227,6 +232,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return changedKv;
             }
 
+            @Override
             public boolean next() throws IOException {
                 if (atEnd) {
                     return false;
@@ -248,10 +254,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 }
             }
 
+            @Override
             public boolean seekBefore(byte[] key) throws IOException {
                 return seekBefore(key, 0, key.length);
             }
 
+            @Override
             public boolean seekBefore(byte[] key, int offset, int length) throws IOException {
 
                 if (top) {
@@ -282,6 +290,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return seekBefore(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
             }
 
+            @Override
             public boolean seekTo() throws IOException {
                 boolean b = delegate.seekTo();
                 if (!b) {
@@ -302,10 +311,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 }
             }
 
+            @Override
             public int seekTo(byte[] key) throws IOException {
                 return seekTo(key, 0, key.length);
             }
 
+            @Override
             public int seekTo(byte[] key, int offset, int length) throws IOException {
                 if (top) {
                     if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) < 0) {
@@ -342,10 +353,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return seekTo(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
             }
 
+            @Override
             public int reseekTo(byte[] key) throws IOException {
                 return reseekTo(key, 0, key.length);
             }
 
+            @Override
             public int reseekTo(byte[] key, int offset, int length) throws IOException {
                 if (top) {
                     if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) < 0) {
@@ -375,11 +388,13 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return reseekTo(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
             }
 
+            @Override
             public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() {
                 return this.delegate.getReader();
             }
 
             // TODO: Need to change as per IndexHalfStoreFileReader
+            @Override
             public boolean isSeeked() {
                 return this.delegate.isSeeked();
             }
@@ -425,13 +440,13 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
     /**
      * In case of top half store, the passed key will be with the start key of the daughter region.
      * But in the actual HFiles, the key will be with the start key of the old parent region. In
-     * order to make the real seek in the HFiles, we need to build the old key. 
-     * 
+     * order to make the real seek in the HFiles, we need to build the old key.
+     *
      * The logic here is just replace daughter region start key with parent region start key
      * in the key part.
-     * 
+     *
      * @param key
-     * 
+     *
      */
     private KeyValue getKeyPresentInHFiles(byte[] key) {
         KeyValue keyValue = new KeyValue(key);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
index 920380b..3057a14 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
@@ -165,6 +165,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @return <code>true</code> if the region is splittable else
    * <code>false</code> if it is not (e.g. its already closed, etc.).
    */
+  @Override
   public boolean prepare() {
     if (!this.parent.isSplittable()) return false;
     // Split key can be null if this region is unsplittable; i.e. has refs.
@@ -215,6 +216,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    *    Call {@link #rollback(Server, RegionServerServices)}
    * @return Regions created
    */
+  @Override
   /* package */PairOfSameType<HRegion> createDaughters(final Server server,
       final RegionServerServices services) throws IOException {
     LOG.info("Starting split of region " + this.parent);
@@ -288,16 +290,19 @@ public class IndexSplitTransaction extends SplitTransaction {
       if (metaEntries == null || metaEntries.isEmpty()) {
         MetaTableAccessor.splitRegion(server.getConnection(), parent.getRegionInfo(),
                 daughterRegions.getFirst().getRegionInfo(),
-                daughterRegions.getSecond().getRegionInfo(), server.getServerName());
+                daughterRegions.getSecond().getRegionInfo(), server.getServerName(),
+                parent.getTableDesc().getRegionReplication());
       } else {
         offlineParentInMetaAndputMetaEntries(server.getConnection(),
           parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(), daughterRegions
-              .getSecond().getRegionInfo(), server.getServerName(), metaEntries);
+              .getSecond().getRegionInfo(), server.getServerName(), metaEntries,
+              parent.getTableDesc().getRegionReplication());
       }
     }
     return daughterRegions;
   }
 
+  @Override
   public PairOfSameType<HRegion> stepsBeforePONR(final Server server,
       final RegionServerServices services, boolean testing) throws IOException {
     // Set ephemeral SPLITTING znode up in zk.  Mocked servers sometimes don't
@@ -380,6 +385,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @throws IOException If thrown, transaction failed.
    *          Call {@link #rollback(Server, RegionServerServices)}
    */
+  @Override
   /* package */void openDaughters(final Server server,
       final RegionServerServices services, HRegion a, HRegion b)
       throws IOException {
@@ -565,6 +571,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @throws IOException
    * @see #rollback(Server, RegionServerServices)
    */
+  @Override
   public PairOfSameType<HRegion> execute(final Server server,
       final RegionServerServices services)
   throws IOException {
@@ -575,6 +582,7 @@ public class IndexSplitTransaction extends SplitTransaction {
     return stepsAfterPONR(server, services, regions);
   }
 
+  @Override
   public PairOfSameType<HRegion> stepsAfterPONR(final Server server,
       final RegionServerServices services, PairOfSameType<HRegion> regions)
       throws IOException {
@@ -585,7 +593,7 @@ public class IndexSplitTransaction extends SplitTransaction {
 
   private void offlineParentInMetaAndputMetaEntries(Connection conn,
       HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
-      ServerName serverName, List<Mutation> metaEntries) throws IOException {
+      ServerName serverName, List<Mutation> metaEntries, int regionReplication) throws IOException {
     List<Mutation> mutations = metaEntries;
     HRegionInfo copyOfParent = new HRegionInfo(parent);
     copyOfParent.setOffline(true);
@@ -595,7 +603,7 @@ public class IndexSplitTransaction extends SplitTransaction {
     Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
     MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
     mutations.add(putParent);
-    
+
     //Puts for daughters
     Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
     Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
@@ -604,9 +612,18 @@ public class IndexSplitTransaction extends SplitTransaction {
     addLocation(putB, serverName, 1);
     mutations.add(putA);
     mutations.add(putB);
+
+    // Add empty locations for region replicas of daughters so that number of replicas can be
+    // cached whenever the primary region is looked up from meta
+    for (int i = 1; i < regionReplication; i++) {
+      addEmptyLocation(putA, i);
+      addEmptyLocation(putB, i);
+    }
+
     MetaTableAccessor.mutateMetaTable(conn, mutations);
   }
 
+  @Override
   public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
       Bytes.toBytes(sn.getHostAndPort()));
@@ -617,6 +634,13 @@ public class IndexSplitTransaction extends SplitTransaction {
     return p;
   }
 
+  private static Put addEmptyLocation(final Put p, int replicaId){
+    p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(replicaId), null);
+    p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(replicaId), null);
+    p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(replicaId), null);
+    return p;
+  }
+
   /*
    * Open daughter region in its own thread.
    * If we fail, abort this hosting server.
@@ -659,6 +683,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @throws IOException
    * @throws KeeperException
    */
+  @Override
   void openDaughterRegion(final Server server, final HRegion daughter)
   throws IOException, KeeperException {
     HRegionInfo hri = daughter.getRegionInfo();
@@ -767,6 +792,7 @@ public class IndexSplitTransaction extends SplitTransaction {
       this.family = family;
     }
 
+    @Override
     public Void call() throws IOException {
       splitStoreFile(family, sf);
       return null;
@@ -807,6 +833,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @return True if we successfully rolled back, false if we got to the point
    * of no return and so now need to abort the server to minimize damage.
    */
+  @Override
   @SuppressWarnings("deprecation")
   public boolean rollback(final Server server, final RegionServerServices services)
   throws IOException {
@@ -879,10 +906,12 @@ public class IndexSplitTransaction extends SplitTransaction {
     return result;
   }
 
+  @Override
   HRegionInfo getFirstDaughter() {
     return hri_a;
   }
 
+  @Override
   HRegionInfo getSecondDaughter() {
     return hri_b;
   }
@@ -971,7 +1000,7 @@ public class IndexSplitTransaction extends SplitTransaction {
     return ZKAssign.transitionNode(zkw, parent, serverName,
       beginState, endState, znodeVersion, payload);
   }
-  
+
   public HRegion getParent() {
     return this.parent;
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
index f074df7..add9b72 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
@@ -81,7 +81,8 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
                 this.mergedRegion = rmt.stepsBeforePONR(rss, rss, false);
                 rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(),
                     indexRegionA.getRegionInfo(), indexRegionB.getRegionInfo(),
-                    rss.getServerName(), metaEntries);
+                    rss.getServerName(), metaEntries,
+                    mergedRegion.getTableDesc().getRegionReplication());
             } catch (Exception e) {
                 ctx.bypass();
                 LOG.warn("index regions merge failed with the exception ", e);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index ce18cc2..69fc6f6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -120,7 +120,7 @@ public class SpillableGroupByCache implements GroupByCache {
 
     /**
      * Instantiates a Loading LRU Cache that stores key / aggregator[] tuples used for group by queries
-     * 
+     *
      * @param estSize
      * @param estValueSize
      * @param aggs
@@ -325,7 +325,7 @@ public class SpillableGroupByCache implements GroupByCache {
 
     /**
      * Closes cache and releases spill resources
-     * 
+     *
      * @throws IOException
      */
     @Override
@@ -358,7 +358,9 @@ public class SpillableGroupByCache implements GroupByCache {
 
             @Override
             public boolean next(List<Cell> results) throws IOException {
-                if (!cacheIter.hasNext()) { return false; }
+                if (!cacheIter.hasNext()) {
+                    return false;
+                }
                 Map.Entry<ImmutableBytesWritable, Aggregator[]> ce = cacheIter.next();
                 ImmutableBytesWritable key = ce.getKey();
                 Aggregator[] aggs = ce.getValue();
@@ -377,6 +379,11 @@ public class SpillableGroupByCache implements GroupByCache {
             public long getMaxResultSize() {
               return s.getMaxResultSize();
             }
+
+            @Override
+            public int getBatch() {
+                return s.getBatch();
+            }
         };
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
index ff9ac76..828f776 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
@@ -22,14 +22,14 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 
 public abstract class BaseRegionScanner implements RegionScanner {
 
     @Override
     public boolean isFilterDone() {
-        return false; 
+        return false;
     }
 
     @Override
@@ -38,10 +38,10 @@ public abstract class BaseRegionScanner implements RegionScanner {
     }
 
     @Override
-    public boolean next(List<Cell> result, int limit) throws IOException {
+    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
         return next(result);
     }
-    
+
     @Override
     public boolean reseek(byte[] row) throws IOException {
         throw new DoNotRetryIOException("Unsupported");
@@ -58,7 +58,7 @@ public abstract class BaseRegionScanner implements RegionScanner {
     }
 
     @Override
-    public boolean nextRaw(List<Cell> result, int limit) throws IOException {
-        return next(result, limit);
+    public boolean nextRaw(List<Cell> result, ScannerContext scannerContext) throws IOException {
+        return next(result, scannerContext);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index a2269b4..fc74968 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
@@ -60,7 +61,7 @@ import com.google.common.collect.ImmutableList;
 
 
 abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
-    
+
     public static final String AGGREGATORS = "_Aggs";
     public static final String UNORDERED_GROUP_BY_EXPRESSIONS = "_UnorderedGroupByExpressions";
     public static final String KEY_ORDERED_GROUP_BY_EXPRESSIONS = "_OrderedGroupByExpressions";
@@ -91,7 +92,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
      * Attribute name used to pass custom annotations in Scans and Mutations (later). Custom annotations
      * are used to augment log lines emitted by Phoenix. See https://issues.apache.org/jira/browse/PHOENIX-1198.
      */
-    public static final String CUSTOM_ANNOTATIONS = "_Annot"; 
+    public static final String CUSTOM_ANNOTATIONS = "_Annot";
 
     /** Exposed for testing */
     public static final String SCANNER_OPENED_TRACE_INFO = "Scanner opened on server";
@@ -111,8 +112,8 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     public String toString() {
         return this.getClass().getName();
     }
-    
-    
+
+
     private static void throwIfScanOutOfRegion(Scan scan, HRegion region) throws DoNotRetryIOException {
         boolean isLocalIndex = ScanUtil.isLocalIndex(scan);
         byte[] lowerInclusiveScanKey = scan.getStartRow();
@@ -136,7 +137,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
 
     abstract protected boolean isRegionObserverFor(Scan scan);
     abstract protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws Throwable;
-    
+
     @Override
     public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
         final Scan scan, final RegionScanner s) throws IOException {
@@ -153,7 +154,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     /**
      * Wrapper for {@link #postScannerOpen(ObserverContext, Scan, RegionScanner)} that ensures no non IOException is thrown,
      * to prevent the coprocessor from becoming blacklisted.
-     * 
+     *
      */
     @Override
     public final RegionScanner postScannerOpen(
@@ -165,10 +166,10 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             }
             boolean success =false;
             // Save the current span. When done with the child span, reset the span back to
-            // what it was. Otherwise, this causes the thread local storing the current span 
+            // what it was. Otherwise, this causes the thread local storing the current span
             // to not be reset back to null causing catastrophic infinite loops
             // and region servers to crash. See https://issues.apache.org/jira/browse/PHOENIX-1596
-            // TraceScope can't be used here because closing the scope will end up calling 
+            // TraceScope can't be used here because closing the scope will end up calling
             // currentSpan.stop() and that should happen only when we are closing the scanner.
             final Span savedSpan = Trace.currentSpan();
             final Span child = Trace.startSpan(SCANNER_OPENED_TRACE_INFO, savedSpan).getSpan();
@@ -226,7 +227,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
         return getWrappedScanner(c, s, null, null, offset, scan, dataColumns, tupleProjector,
                 dataRegion, indexMaintainer, viewConstants, null, null, projector, ptr);
     }
-    
+
     /**
      * Return wrapped scanner that catches unexpected exceptions (i.e. Phoenix bugs) and
      * re-throws as DoNotRetryIOException to prevent needless retrying hanging the query
@@ -246,7 +247,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             final Expression[] arrayFuncRefs, final int offset, final Scan scan,
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
             final HRegion dataRegion, final IndexMaintainer indexMaintainer,
-            final byte[][] viewConstants, final KeyValueSchema kvSchema, 
+            final byte[][] viewConstants, final KeyValueSchema kvSchema,
             final ValueBitSet kvSchemaBitSet, final TupleProjector projector,
             final ImmutableBytesWritable ptr) {
         return new RegionScanner() {
@@ -262,9 +263,9 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             }
 
             @Override
-            public boolean next(List<Cell> result, int limit) throws IOException {
+            public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
                 try {
-                    return s.next(result, limit);
+                    return s.next(result, scannerContext);
                 } catch (Throwable t) {
                     ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
                     return false; // impossible
@@ -324,30 +325,31 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             }
 
             @Override
-            public boolean nextRaw(List<Cell> result, int limit) throws IOException {
-                try {
-                    boolean next = s.nextRaw(result, limit);
-                    if (result.size() == 0) {
-                        return next;
-                    }
-                    if (arrayFuncRefs != null && arrayFuncRefs.length > 0 && arrayKVRefs.size() > 0) {
-                        replaceArrayIndexElement(arrayKVRefs, arrayFuncRefs, result);
-                    }
-                    if ((offset > 0 || ScanUtil.isLocalIndex(scan))  && !ScanUtil.isAnalyzeTable(scan)) {
-                        IndexUtil.wrapResultUsingOffset(c, result, offset, dataColumns,
-                            tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
-                    }
-                    if (projector != null) {
-                        Tuple tuple = projector.projectResults(new ResultTuple(Result.create(result)));
-                        result.clear();
-                        result.add(tuple.getValue(0));
-                    }
-                    // There is a scanattribute set to retrieve the specific array element
+            public boolean nextRaw(List<Cell> result, ScannerContext scannerContext)
+                throws IOException {
+              try {
+                boolean next = s.nextRaw(result, scannerContext);
+                if (result.size() == 0) {
                     return next;
-                } catch (Throwable t) {
-                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
-                    return false; // impossible
                 }
+                if (arrayFuncRefs != null && arrayFuncRefs.length > 0 && arrayKVRefs.size() > 0) {
+                    replaceArrayIndexElement(arrayKVRefs, arrayFuncRefs, result);
+                }
+                if ((offset > 0 || ScanUtil.isLocalIndex(scan))  && !ScanUtil.isAnalyzeTable(scan)) {
+                    IndexUtil.wrapResultUsingOffset(c, result, offset, dataColumns,
+                        tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
+                }
+                if (projector != null) {
+                    Tuple tuple = projector.projectResults(new ResultTuple(Result.create(result)));
+                    result.clear();
+                    result.add(tuple.getValue(0));
+                }
+                // There is a scanattribute set to retrieve the specific array element
+                return next;
+            } catch (Throwable t) {
+                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                return false; // impossible
+            }
             }
 
             private void replaceArrayIndexElement(final Set<KeyValueColumnExpression> arrayKVRefs,
@@ -387,6 +389,11 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             public long getMaxResultSize() {
                 return s.getMaxResultSize();
             }
+
+            @Override
+            public int getBatch() {
+                return s.getBatch();
+            }
         };
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
index f88a931..43c35a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
@@ -22,6 +22,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 
 public class DelegateRegionScanner implements RegionScanner {
 
@@ -56,23 +57,33 @@ public class DelegateRegionScanner implements RegionScanner {
         delegate.close();
     }
 
+    @Override
     public long getMaxResultSize() {
         return delegate.getMaxResultSize();
     }
 
-    public boolean next(List<Cell> arg0, int arg1) throws IOException {
-        return delegate.next(arg0, arg1);
+    @Override
+    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
+        return delegate.next(result, scannerContext);
     }
 
-    public boolean next(List<Cell> arg0) throws IOException {
-        return delegate.next(arg0);
+    @Override
+    public boolean next(List<Cell> result) throws IOException {
+        return delegate.next(result);
     }
 
-    public boolean nextRaw(List<Cell> arg0, int arg1) throws IOException {
-        return delegate.nextRaw(arg0, arg1);
+    @Override
+    public boolean nextRaw(List<Cell> result, ScannerContext scannerContext) throws IOException {
+        return delegate.nextRaw(result, scannerContext);
     }
 
+    @Override
     public boolean nextRaw(List<Cell> arg0) throws IOException {
         return delegate.nextRaw(arg0);
     }
+
+    @Override
+    public int getBatch() {
+        return delegate.getBatch();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 1f1ba36..19a1663 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -80,7 +80,7 @@ import com.google.common.collect.Maps;
 
 /**
  * Region observer that aggregates grouped rows (i.e. SQL query with GROUP BY clause)
- * 
+ *
  * @since 0.1
  */
 public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
@@ -116,7 +116,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
             ScanUtil.setRowKeyOffset(scan, offset);
         }
-        
+
         List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
         ServerAggregators aggregators =
                 ServerAggregators.deserialize(scan
@@ -124,7 +124,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                         .getEnvironment().getConfiguration());
 
         RegionScanner innerScanner = s;
-        
+
         byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
         List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
         TupleProjector tupleProjector = null;
@@ -142,9 +142,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             }
             ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
             innerScanner =
-                    getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, 
+                    getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector,
                             dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
-        } 
+        }
 
         if (j != null) {
             innerScanner =
@@ -223,13 +223,13 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
     }
 
     /**
-     * 
+     *
      * Cache for distinct values and their aggregations which is completely
      * in-memory (as opposed to spilling to disk). Used when GROUPBY_SPILLABLE_ATTRIB
      * is set to false. The memory usage is tracked at a coursed grain and will
      * throw and abort if too much is used.
      *
-     * 
+     *
      * @since 3.0.0
      */
     private static final class InMemoryGroupByCache implements GroupByCache {
@@ -238,9 +238,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         private final ServerAggregators aggregators;
         private final RegionCoprocessorEnvironment env;
         private final byte[] customAnnotations;
-        
+
         private int estDistVals;
-        
+
         InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
             int estValueSize = aggregators.getEstimatedByteSize();
             long estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize);
@@ -252,7 +252,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             this.chunk = tenantCache.getMemoryManager().allocate(estSize);
             this.customAnnotations = customAnnotations;
         }
-        
+
         @Override
         public void close() throws IOException {
             this.chunk.close();
@@ -291,7 +291,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             chunk.resize(estSize);
 
             final List<KeyValue> aggResults = new ArrayList<KeyValue>(aggregateMap.size());
-            
+
             final Iterator<Map.Entry<ImmutableBytesPtr, Aggregator[]>> cacheIter =
                     aggregateMap.entrySet().iterator();
             while (cacheIter.hasNext()) {
@@ -333,7 +333,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
 
                 @Override
                 public boolean next(List<Cell> results) throws IOException {
-                    if (index >= aggResults.size()) return false;
+                    if (index >= aggResults.size()) {
+                        return false;
+                    }
                     results.add(aggResults.get(index));
                     index++;
                     return index < aggResults.size();
@@ -343,6 +345,11 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 public long getMaxResultSize() {
                 	return s.getMaxResultSize();
                 }
+
+                @Override
+                public int getBatch() {
+                    return s.getBatch();
+                }
             };
         }
 
@@ -350,22 +357,22 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         public long size() {
             return aggregateMap.size();
         }
-        
+
     }
     private static final class GroupByCacheFactory {
         public static final GroupByCacheFactory INSTANCE = new GroupByCacheFactory();
-        
+
         private GroupByCacheFactory() {
         }
-        
+
         GroupByCache newCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
             Configuration conf = env.getConfiguration();
             boolean spillableEnabled =
                     conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
             if (spillableEnabled) {
                 return new SpillableGroupByCache(env, tenantId, aggregators, estDistVals);
-            } 
-            
+            }
+
             return new InMemoryGroupByCache(env, tenantId, customAnnotations, aggregators, estDistVals);
         }
     }
@@ -388,14 +395,14 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserver.ESTIMATED_DISTINCT_VALUES);
         if (estDistValsBytes != null) {
             // Allocate 1.5x estimation
-            estDistVals = Math.max(MIN_DISTINCT_VALUES, 
+            estDistVals = Math.max(MIN_DISTINCT_VALUES,
                             (int) (Bytes.toInt(estDistValsBytes) * 1.5f));
         }
 
         final boolean spillableEnabled =
                 conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
 
-        GroupByCache groupByCache = 
+        GroupByCache groupByCache =
                 GroupByCacheFactory.INSTANCE.newCache(
                         env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan),
                         aggregators, estDistVals);
@@ -453,7 +460,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
      * Used for an aggregate query in which the key order match the group by key order. In this
      * case, we can do the aggregation as we scan, by detecting when the group by key changes.
      * @param limit TODO
-     * @throws IOException 
+     * @throws IOException
      */
     private RegionScanner scanOrdered(final ObserverContext<RegionCoprocessorEnvironment> c,
             final Scan scan, final RegionScanner scanner, final List<Expression> expressions,
@@ -559,11 +566,15 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 currentKey = null;
                 return false;
             }
-            
+
             @Override
             public long getMaxResultSize() {
                 return scanner.getMaxResultSize();
             }
+            @Override
+            public int getBatch() {
+                return scanner.getBatch();
+            }
         };
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index cdfc771..1e34d96 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.cache.HashCache;
@@ -48,7 +49,7 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.TupleUtil;
 
 public class HashJoinRegionScanner implements RegionScanner {
-    
+
     private final RegionScanner scanner;
     private final TupleProjector projector;
     private final HashJoinInfo joinInfo;
@@ -60,7 +61,7 @@ public class HashJoinRegionScanner implements RegionScanner {
     private List<Tuple>[] tempTuples;
     private ValueBitSet tempDestBitSet;
     private ValueBitSet[] tempSrcBitSet;
-    
+
     @SuppressWarnings("unchecked")
     public HashJoinRegionScanner(RegionScanner scanner, TupleProjector projector, HashJoinInfo joinInfo, ImmutableBytesWritable tenantId, RegionCoprocessorEnvironment env) throws IOException {
         this.scanner = scanner;
@@ -92,8 +93,8 @@ public class HashJoinRegionScanner implements RegionScanner {
             }
             HashCache hashCache = (HashCache)cache.getServerCache(joinId);
             if (hashCache == null)
-                throw new DoNotRetryIOException("Could not find hash cache for joinId: " 
-                        + Bytes.toString(joinId.get(), joinId.getOffset(), joinId.getLength()) 
+                throw new DoNotRetryIOException("Could not find hash cache for joinId: "
+                        + Bytes.toString(joinId.get(), joinId.getOffset(), joinId.getLength())
                         + ". The cache might have expired and have been removed.");
             hashCaches[i] = hashCache;
             tempSrcBitSet[i] = ValueBitSet.newInstance(joinInfo.getSchemas()[i]);
@@ -103,18 +104,19 @@ public class HashJoinRegionScanner implements RegionScanner {
             this.projector.setValueBitSet(tempDestBitSet);
         }
     }
-    
+
     private void processResults(List<Cell> result, boolean hasBatchLimit) throws IOException {
         if (result.isEmpty())
             return;
-        
+
         Tuple tuple = new ResultTuple(Result.create(result));
         // For backward compatibility. In new versions, HashJoinInfo.forceProjection()
         // always returns true.
         if (joinInfo.forceProjection()) {
             tuple = projector.projectResults(tuple);
         }
-        
+
+        // TODO: fix below Scanner.next() and Scanner.nextRaw() methods as well.
         if (hasBatchLimit)
             throw new UnsupportedOperationException("Cannot support join operations in scans with limit");
 
@@ -157,7 +159,7 @@ public class HashJoinRegionScanner implements RegionScanner {
                         Tuple lhs = resultQueue.poll();
                         if (!earlyEvaluation) {
                             ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]);
-                            tempTuples[i] = hashCaches[i].get(key);                        	
+                            tempTuples[i] = hashCaches[i].get(key);
                             if (tempTuples[i] == null) {
                                 if (type == JoinType.Inner || type == JoinType.Semi) {
                                     continue;
@@ -171,7 +173,7 @@ public class HashJoinRegionScanner implements RegionScanner {
                             Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ?
                                     lhs : TupleProjector.mergeProjectedValue(
                                             (ProjectedValueTuple) lhs, schema, tempDestBitSet,
-                                            null, joinInfo.getSchemas()[i], tempSrcBitSet[i], 
+                                            null, joinInfo.getSchemas()[i], tempSrcBitSet[i],
                                             joinInfo.getFieldPositions()[i]);
                             resultQueue.offer(joined);
                             continue;
@@ -180,7 +182,7 @@ public class HashJoinRegionScanner implements RegionScanner {
                             Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ?
                                     lhs : TupleProjector.mergeProjectedValue(
                                             (ProjectedValueTuple) lhs, schema, tempDestBitSet,
-                                            t, joinInfo.getSchemas()[i], tempSrcBitSet[i], 
+                                            t, joinInfo.getSchemas()[i], tempSrcBitSet[i],
                                             joinInfo.getFieldPositions()[i]);
                             resultQueue.offer(joined);
                         }
@@ -211,18 +213,19 @@ public class HashJoinRegionScanner implements RegionScanner {
             }
         }
     }
-    
+
     private boolean shouldAdvance() {
         if (!resultQueue.isEmpty())
             return false;
-        
+
         return hasMore;
     }
-    
+
     private boolean nextInQueue(List<Cell> results) {
-        if (resultQueue.isEmpty())
+        if (resultQueue.isEmpty()) {
             return false;
-        
+        }
+
         Tuple tuple = resultQueue.poll();
         for (int i = 0; i < tuple.size(); i++) {
             results.add(tuple.getValue(i));
@@ -252,19 +255,19 @@ public class HashJoinRegionScanner implements RegionScanner {
             processResults(result, false);
             result.clear();
         }
-        
+
         return nextInQueue(result);
     }
 
     @Override
-    public boolean nextRaw(List<Cell> result, int limit)
+    public boolean nextRaw(List<Cell> result, ScannerContext scannerContext)
             throws IOException {
         while (shouldAdvance()) {
-            hasMore = scanner.nextRaw(result, limit);
-            processResults(result, true);
+            hasMore = scanner.nextRaw(result, scannerContext);
+            processResults(result, false); // TODO fix honoring the limit
             result.clear();
         }
-        
+
         return nextInQueue(result);
     }
 
@@ -285,19 +288,19 @@ public class HashJoinRegionScanner implements RegionScanner {
             processResults(result, false);
             result.clear();
         }
-        
+
         return nextInQueue(result);
     }
 
     @Override
-    public boolean next(List<Cell> result, int limit) throws IOException {
+    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
         while (shouldAdvance()) {
-            hasMore = scanner.next(result, limit);
-            processResults(result, true);
+            hasMore = scanner.next(result, scannerContext);
+            processResults(result, false); // TODO honoring the limit
             result.clear();
         }
-        
-        return nextInQueue(result);
+
+      return nextInQueue(result);
     }
 
     @Override
@@ -305,5 +308,10 @@ public class HashJoinRegionScanner implements RegionScanner {
         return this.scanner.getMaxResultSize();
     }
 
+    @Override
+    public int getBatch() {
+        return this.scanner.getBatch();
+    }
+
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 6f1d5ac..c40e3cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -69,20 +69,20 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
     protected ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1);
     private boolean enableRebuildIndex = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD;
     private long rebuildIndexTimeInterval = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL;
-  
+
     @Override
     public void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
             boolean abortRequested) {
         executor.shutdownNow();
         GlobalCache.getInstance(c.getEnvironment()).getMetaDataCache().invalidateAll();
     }
-    
+
     @Override
     public void start(CoprocessorEnvironment env) throws IOException {
-        // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves 
+        // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves
         // among region servers because we relies on server time of RS which is hosting
         // SYSTEM.CATALOG
-        long sleepTime = env.getConfiguration().getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB, 
+        long sleepTime = env.getConfiguration().getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB,
             QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL);
         try {
             if(sleepTime > 0) {
@@ -91,12 +91,12 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
         } catch (InterruptedException ie) {
             Thread.currentThread().interrupt();
         }
-        enableRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, 
+        enableRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
             QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD);
-        rebuildIndexTimeInterval = env.getConfiguration().getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB, 
+        rebuildIndexTimeInterval = env.getConfiguration().getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
             QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL);
     }
-    
+
 
     @Override
     public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
@@ -119,7 +119,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
             LOG.error("BuildIndexScheduleTask cannot start!", ex);
         }
     }
-    
+
     /**
      * Task runs periodically to build indexes whose INDEX_NEED_PARTIALLY_REBUILD is set true
      *
@@ -133,7 +133,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
         public BuildIndexScheduleTask(RegionCoprocessorEnvironment env) {
             this.env = env;
         }
-      
+
         private String getJdbcUrl() {
             String zkQuorum = this.env.getConfiguration().get(HConstants.ZOOKEEPER_QUORUM);
             String zkClientPort = this.env.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT,
@@ -144,7 +144,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                 + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkClientPort
                 + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkParentNode;
         }
-      
+
+        @Override
         public void run() {
             RegionScanner scanner = null;
             PhoenixConnection conn = null;
@@ -199,7 +200,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
                     if ((dataTable == null || dataTable.length == 0)
                             || (indexStat == null || indexStat.length == 0)
-                            || ((Bytes.compareTo(PIndexState.DISABLE.getSerializedBytes(), indexStat) != 0) 
+                            || ((Bytes.compareTo(PIndexState.DISABLE.getSerializedBytes(), indexStat) != 0)
                                     && (Bytes.compareTo(PIndexState.INACTIVE.getSerializedBytes(), indexStat) != 0))) {
                         // index has to be either in disable or inactive state
                         // data table name can't be empty

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index ddde407..77e124d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -199,7 +199,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             indexMaintainer = indexMaintainers.get(0);
             viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
         }
-        
+
         final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
         final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
         innerScanner =
@@ -285,12 +285,12 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
                 } finally {
                     try {
                         if(iterator != null) {
-                            iterator.close();    
+                            iterator.close();
                         }
                     } catch (SQLException e) {
                         ServerUtil.throwIOException(region.getRegionNameAsString(), e);
                     } finally {
-                        chunk.close();                
+                        chunk.close();
                     }
                 }
             }
@@ -299,6 +299,11 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             public long getMaxResultSize() {
                 return s.getMaxResultSize();
             }
+
+            @Override
+            public int getBatch() {
+              return s.getBatch();
+            }
         };
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index e43e5e5..2d6d98a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -101,8 +101,8 @@ import com.google.common.collect.Sets;
 
 /**
  * Region observer that aggregates ungrouped rows(i.e. SQL query with aggregation function and no GROUP BY).
- * 
- * 
+ *
+ *
  * @since 0.1
  */
 public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
@@ -116,7 +116,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     public static final String EMPTY_CF = "EmptyCF";
     private static final Logger logger = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
     private KeyValueBuilder kvBuilder;
-    
+
     @Override
     public void start(CoprocessorEnvironment e) throws IOException {
         super.start(e);
@@ -139,14 +139,14 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     public static void serializeIntoScan(Scan scan) {
         scan.setAttribute(BaseScannerRegionObserver.UNGROUPED_AGG, QueryConstants.TRUE);
     }
-    
+
     @Override
     public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s)
             throws IOException {
         s = super.preScannerOpen(e, scan, s);
         if (ScanUtil.isAnalyzeTable(scan)) {
             // We are setting the start row and stop row such that it covers the entire region. As part
-            // of Phonenix-1263 we are storing the guideposts against the physical table rather than 
+            // of Phonenix-1263 we are storing the guideposts against the physical table rather than
             // individual tenant specific tables.
             scan.setStartRow(HConstants.EMPTY_START_ROW);
             scan.setStopRow(HConstants.EMPTY_END_ROW);
@@ -154,7 +154,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         }
         return s;
     }
-    
+
     @Override
     protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
         int offset = 0;
@@ -179,9 +179,9 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
         List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
         List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024);
-        
+
         RegionScanner theScanner = s;
-        
+
         byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
         PTable projectedTable = null;
         List<Expression> selectExpressions = null;
@@ -226,14 +226,14 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             }
             ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
             theScanner =
-                    getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, 
+                    getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
                             dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
-        } 
-        
+        }
+
         if (j != null)  {
             theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment());
         }
-        
+
         int batchSize = 0;
         List<Mutation> mutations = Collections.emptyList();
         boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;
@@ -330,7 +330,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                                         }
                                         column.getDataType().coerceBytes(ptr, value,
                                             expression.getDataType(), expression.getMaxLength(),
-                                            expression.getScale(), expression.getSortOrder(), 
+                                            expression.getScale(), expression.getSortOrder(),
                                             column.getMaxLength(), column.getScale(),
                                             column.getSortOrder());
                                         byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
@@ -418,7 +418,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                 }
             }
         }
-        
+
         if (logger.isDebugEnabled()) {
         	logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
         }
@@ -438,7 +438,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
         }
         final KeyValue aggKeyValue = keyValue;
-        
+
         RegionScanner scanner = new BaseRegionScanner() {
             private boolean done = !hadAny;
 
@@ -464,11 +464,16 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                 results.add(aggKeyValue);
                 return false;
             }
-            
+
             @Override
             public long getMaxResultSize() {
             	return scan.getMaxResultSize();
             }
+
+            @Override
+            public int getBatch() {
+                return innerScanner.getBatch();
+            }
         };
         return scanner;
     }
@@ -496,7 +501,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         }
         indexMutations.clear();
     }
-    
+
     @Override
     public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c,
         final Store store, InternalScanner scanner, final ScanType scanType)
@@ -505,8 +510,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         InternalScanner internalScanner = scanner;
         if (scanType.equals(ScanType.COMPACT_DROP_DELETES)) {
             try {
-                boolean useCurrentTime = 
-                        c.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, 
+                boolean useCurrentTime =
+                        c.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
                                 QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
                 // Provides a means of clients controlling their timestamps to not use current time
                 // when background tasks are updating stats. Instead we track the max timestamp of
@@ -526,8 +531,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         }
         return internalScanner;
     }
-    
-    
+
+
     @Override
     public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, HRegion l, HRegion r)
             throws IOException {
@@ -535,8 +540,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         TableName table = region.getRegionInfo().getTable();
         StatisticsCollector stats = null;
         try {
-            boolean useCurrentTime = 
-                    e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, 
+            boolean useCurrentTime =
+                    e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
                             QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
             // Provides a means of clients controlling their timestamps to not use current time
             // when background tasks are updating stats. Instead we track the max timestamp of
@@ -544,7 +549,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : StatisticsCollector.NO_TIMESTAMP;
             stats = new StatisticsCollector(e.getEnvironment(), table.getNameAsString(), clientTimeStamp);
             stats.splitStats(region, l, r);
-        } catch (IOException ioe) { 
+        } catch (IOException ioe) {
             if(logger.isWarnEnabled()) {
                 logger.warn("Error while collecting stats during split for " + table,ioe);
             }
@@ -559,7 +564,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             return PTableImpl.createFromProto(ptableProto);
         } catch (IOException e) {
             throw new RuntimeException(e);
-        } 
+        }
     }
 
     private static List<Expression> deserializeExpressions(byte[] b) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 3469042..71cc1d6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,4 +70,4 @@ public class LocalTable implements LocalHBaseState {
     scanner.close();
     return r;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
index 68555ef..d39b01d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
@@ -58,14 +58,14 @@ public class FamilyOnlyFilter extends FamilyFilter {
   @Override
   public ReturnCode filterKeyValue(Cell v) {
     if (done) {
-      return ReturnCode.SKIP;
+      return ReturnCode.NEXT_ROW;
     }
     ReturnCode code = super.filterKeyValue(v);
     if (previousMatchFound) {
       // we found a match before, and now we are skipping the key because of the family, therefore
       // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP)) {
-      done = true;
+      if (code.equals(ReturnCode.SKIP) || code.equals(ReturnCode.NEXT_ROW)) {
+        done = true;
       }
     } else {
       // if we haven't seen a match before, then it doesn't matter what we see now, except to mark

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index e225696..435a1c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -57,7 +57,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     /**
      * Same a {@link KeyValueScanner#next()} except that we filter out the next {@link KeyValue} until we find one that
      * passes the filter.
-     * 
+     *
      * @return the next {@link KeyValue} or <tt>null</tt> if no next {@link KeyValue} is present and passes all the
      *         filters.
      */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index b89c807..b5e6a63 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -59,14 +59,14 @@ public class PhoenixIndexBuilder extends CoveredColumnsIndexBuilder {
             Mutation m = miniBatchOp.getOperation(i);
             keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow()));
             List<IndexMaintainer> indexMaintainers = getCodec().getIndexMaintainers(m.getAttributesMap());
-            
+
             for(IndexMaintainer indexMaintainer: indexMaintainers) {
                 if (indexMaintainer.isImmutableRows() && indexMaintainer.isLocalIndex()) continue;
                 indexTableName.set(indexMaintainer.getIndexTableName());
                 if (maintainers.get(indexTableName) != null) continue;
                 maintainers.put(indexTableName, indexMaintainer);
             }
-            
+
         }
         if (maintainers.isEmpty()) return;
         Scan scan = IndexManagementUtil.newLocalStateScan(new ArrayList<IndexMaintainer>(maintainers.values()));
@@ -100,7 +100,7 @@ public class PhoenixIndexBuilder extends CoveredColumnsIndexBuilder {
     private PhoenixIndexCodec getCodec() {
         return (PhoenixIndexCodec)this.codec;
     }
-    
+
     @Override
     public byte[] getBatchId(Mutation m){
         return this.codec.getBatchId(m);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
index 88e141a..52fbe9c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
@@ -31,15 +31,15 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class RegionScannerResultIterator extends BaseResultIterator {
     private final RegionScanner scanner;
-    
+
     public RegionScannerResultIterator(RegionScanner scanner) {
         this.scanner = scanner;
     }
-    
+
     @Override
     public Tuple next() throws SQLException {
-        // XXX: No access here to the region instance to enclose this with startRegionOperation / 
-        // stopRegionOperation 
+        // XXX: No access here to the region instance to enclose this with startRegionOperation /
+        // stopRegionOperation
         synchronized (scanner) {
             try {
                 // TODO: size
@@ -48,6 +48,7 @@ public class RegionScannerResultIterator extends BaseResultIterator {
                 // since this is an indication of whether or not there are more values after the
                 // ones returned
                 boolean hasMore = scanner.nextRaw(results);
+
                 if (!hasMore && results.isEmpty()) {
                     return null;
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index de59304..0e50923 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 /**
@@ -58,15 +59,15 @@ public class StatisticsScanner implements InternalScanner {
     }
 
     @Override
-    public boolean next(List<Cell> result, int limit) throws IOException {
-        boolean ret = delegate.next(result, limit);
+    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
+        boolean ret = delegate.next(result, scannerContext);
         updateStat(result);
         return ret;
     }
 
     /**
      * Update the current statistics based on the lastest batch of key-values from the underlying scanner
-     * 
+     *
      * @param results
      *            next batch of {@link KeyValue}s
      */
@@ -122,4 +123,5 @@ public class StatisticsScanner implements InternalScanner {
             }
         }
     }
-}
\ No newline at end of file
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
index 12f1863..030b114 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.ipc.RpcScheduler.Context;
+import org.apache.hadoop.hbase.ipc.RpcServer.Connection;
 import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -86,11 +87,12 @@ public class PhoenixIndexRpcSchedulerTest {
     }
 
     private void dispatchCallWithPriority(RpcScheduler scheduler, int priority) throws Exception {
+        Connection connection = Mockito.mock(Connection.class);
         CallRunner task = Mockito.mock(CallRunner.class);
         RequestHeader header = RequestHeader.newBuilder().setPriority(priority).build();
         RpcServer server = new RpcServer(null, "test-rpcserver", null, isa, conf, scheduler);
         RpcServer.Call call =
-                server.new Call(0, null, null, header, null, null, null, null, 10, null);
+                server.new Call(0, null, null, header, null, null, connection, null, 10, null, null);
         Mockito.when(task.getCall()).thenReturn(call);
 
         scheduler.dispatch(task);
@@ -98,4 +100,4 @@ public class PhoenixIndexRpcSchedulerTest {
         Mockito.verify(task).getCall();
         Mockito.verifyNoMoreInteractions(task);
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
index 54db5d8..e996b23 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
@@ -37,7 +37,6 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-
 import org.apache.phoenix.hbase.index.covered.IndexUpdate;
 import org.apache.phoenix.hbase.index.covered.LocalTableState;
 import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
index 216f548..808e6bc 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
@@ -47,7 +47,7 @@ public class TestFamilyOnlyFilter {
 
     kv = new KeyValue(row, fam2, qual, 10, val);
     code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
+    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
   }
 
   @Test
@@ -61,7 +61,7 @@ public class TestFamilyOnlyFilter {
     KeyValue kv = new KeyValue(row, fam, qual, 10, val);
 
     ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
+    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
 
     kv = new KeyValue(row, fam2, qual, 10, val);
     code = filter.filterKeyValue(kv);
@@ -69,7 +69,7 @@ public class TestFamilyOnlyFilter {
 
     kv = new KeyValue(row, fam3, qual, 10, val);
     code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
+    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
   }
 
   @Test
@@ -83,7 +83,7 @@ public class TestFamilyOnlyFilter {
     KeyValue kv = new KeyValue(row, fam, qual, 10, val);
 
     ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
+    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
 
     KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
     code = filter.filterKeyValue(accept);
@@ -91,12 +91,12 @@ public class TestFamilyOnlyFilter {
 
     kv = new KeyValue(row, fam3, qual, 10, val);
     code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
+    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
 
     // we shouldn't match the family again - everything after a switched family should be ignored
     code = filter.filterKeyValue(accept);
     assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.SKIP, code);
+      ReturnCode.NEXT_ROW, code);
 
     // reset the filter and we should accept it again
     filter.reset();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 60c11d7..ae577bd 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -317,9 +317,9 @@ public class TestWALRecoveryCaching {
         }
 
         LOG.info("Starting region server:" + server.getHostname());
-        cluster.startRegionServer(server.getHostname());
+        cluster.startRegionServer(server.getHostname(), server.getPort());
 
-        cluster.waitForRegionServerToStart(server.getHostname(), TIMEOUT);
+        cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), TIMEOUT);
 
         // start a server to get back to the base number of servers
         LOG.info("STarting server to replace " + server);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 7ed0801..b2b9a47 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -85,7 +85,6 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-testing-util</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
       <optional>true</optional>
       <exclusions>
@@ -98,7 +97,6 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-it</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
       <exclusions>
@@ -111,41 +109,34 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-protocol</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-client</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 2db1af6..015a660 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -54,7 +54,6 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-testing-util</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
       <optional>true</optional>
       <exclusions>
@@ -67,7 +66,6 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-it</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
       <exclusions>
@@ -80,41 +78,56 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
-      <version>${hbase.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-protocol</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-client</artifactId>
-      <version>${hbase.version}</version>
+    </dependency>
+   <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>


[04/31] phoenix git commit: PHOENIX-1703 Fail connection when server minor version is less than client minor version

Posted by ap...@apache.org.
PHOENIX-1703 Fail connection when server minor version is less than client minor version


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4bc162d8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4bc162d8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4bc162d8

Branch: refs/heads/4.x-HBase-1.x
Commit: 4bc162d8f254d01bfff71d429a124e5c2d146054
Parents: a29e163
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Mar 19 22:23:20 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Mar 21 11:10:04 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/util/MetaDataUtil.java   | 22 +++++++++++++-------
 .../apache/phoenix/util/MetaDataUtilTest.java   | 18 +++++++++-------
 2 files changed, 24 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4bc162d8/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index c1aa2cc..f916f5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -49,16 +49,16 @@ import org.apache.phoenix.hbase.index.util.VersionUtil;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.PName;
-import org.apache.phoenix.schema.types.PSmallint;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SequenceKey;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PSmallint;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,21 +79,22 @@ public class MetaDataUtil {
     public static final String PARENT_TABLE_KEY = "PARENT_TABLE";
     public static final byte[] PARENT_TABLE_KEY_BYTES = Bytes.toBytes("PARENT_TABLE");
     
-    public static boolean areClientAndServerCompatible(long version) {
+    public static boolean areClientAndServerCompatible(long serverHBaseAndPhoenixVersion) {
         // As of 3.0, we allow a client and server to differ for the minor version.
         // Care has to be taken to upgrade the server before the client, as otherwise
         // the client may call expressions that don't yet exist on the server.
         // Differing by the patch version has always been allowed.
         // Only differing by the major version is not allowed.
-        return areClientAndServerCompatible(MetaDataUtil.decodePhoenixVersion(version), MetaDataProtocol.PHOENIX_MAJOR_VERSION);
+        return areClientAndServerCompatible(MetaDataUtil.decodePhoenixVersion(serverHBaseAndPhoenixVersion), MetaDataProtocol.PHOENIX_MAJOR_VERSION, MetaDataProtocol.PHOENIX_MINOR_VERSION);
     }
 
     // Default scope for testing
-    static boolean areClientAndServerCompatible(int version, int pMajor) {
+    static boolean areClientAndServerCompatible(int serverVersion, int clientMajorVersion, int clientMinorVersion) {
         // A server and client with the same major and minor version number must be compatible.
         // So it's important that we roll the PHOENIX_MAJOR_VERSION or PHOENIX_MINOR_VERSION
         // when we make an incompatible change.
-        return VersionUtil.encodeMaxMinorVersion(pMajor) >= version && VersionUtil.encodeMinMinorVersion(pMajor) <= version;
+        return VersionUtil.encodeMinPatchVersion(clientMajorVersion, clientMinorVersion) <= serverVersion && // Minor major and minor cannot be ahead of server
+                VersionUtil.encodeMaxMinorVersion(clientMajorVersion) >= serverVersion; // Major version must at least be up to server version
     }
 
     // Given the encoded integer representing the phoenix version in the encoded version value.
@@ -129,6 +130,11 @@ public class MetaDataUtil {
         return major + "." + minor + "." + patch;
     }
 
+    public static int encodePhoenixVersion() {
+        return VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, MetaDataProtocol.PHOENIX_MINOR_VERSION,
+                MetaDataProtocol.PHOENIX_PATCH_NUMBER);
+    }
+
     public static long encodeHBaseAndPhoenixVersions(String hbaseVersion) {
         return (((long) VersionUtil.encodeVersion(hbaseVersion)) << (Byte.SIZE * 5)) |
                 (((long) VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, MetaDataProtocol.PHOENIX_MINOR_VERSION,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4bc162d8/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
index 427b3bf..9597b9c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
@@ -45,14 +45,16 @@ public class MetaDataUtilTest {
     
     @Test
     public void testCompatibility() {
-        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,1), 1));
-        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,10), 1));
-        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,0), 1));
-        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,255), 1));
-        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(2,2,0), 2));
-        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(2,10,36), 2));
-        assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 4));
-        assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 2));
+        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,1), 1, 2));
+        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,10), 1, 1));
+        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,0), 1, 2));
+        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,255), 1, 2));
+        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(2,2,0), 2, 0));
+        assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(2,10,36), 2, 9));
+        assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 4, 0));
+        assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 2, 0));
+        assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 3, 2));
+        assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 3, 5));
     }
 
   /**


[22/31] phoenix git commit: PHOENIX-1763 Support building with HBase-1.1.0 (Enis Soztutar)

Posted by ap...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index c2ff589..31a53c3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -76,7 +76,7 @@
     <test.output.tofile>true</test.output.tofile>
 
     <!-- Hadoop Versions -->
-    <hbase.version>1.0.1-SNAPSHOT</hbase.version>
+    <hbase.version>1.1.0-SNAPSHOT</hbase.version>
     <hadoop-two.version>2.5.1</hadoop-two.version>
 
     <!-- Dependency versions -->
@@ -430,6 +430,11 @@
       <!-- HBase dependencies -->
       <dependency>
         <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-annotations</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-testing-util</artifactId>
         <version>${hbase.version}</version>
         <scope>test</scope>
@@ -453,13 +458,34 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-common</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-client</artifactId>
         <version>${hbase.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-client</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-server</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-server</artifactId>
         <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
       </dependency>
       <dependency>
         <groupId>org.apache.hbase</groupId>
@@ -473,6 +499,26 @@
         <type>test-jar</type>
         <scope>test</scope>
       </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-hadoop2-compat</artifactId>
+        <version>${hbase.version}</version>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-hadoop2-compat</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-it</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
 
       <!-- Hadoop Dependencies -->
       <dependency>


[27/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

Posted by ap...@apache.org.
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/33cb45d0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/33cb45d0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/33cb45d0

Branch: refs/heads/4.3
Commit: 33cb45d0ea53f0155824a84fc3ca6243ace9ecef
Parents: 52d1833
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed May 20 09:54:13 2015 -0700

----------------------------------------------------------------------
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --------------
 .../covered/filter/TestFamilyOnlyFilter.java    | 106 -------------------
 2 files changed, 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/33cb45d0/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-    this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-    super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-    return done;
-  }
-
-  @Override
-  public void reset() {
-    done = false;
-    previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-    if (done) {
-      return ReturnCode.SKIP;
-    }
-    ReturnCode code = super.filterKeyValue(v);
-    if (previousMatchFound) {
-      // we found a match before, and now we are skipping the key because of the family, therefore
-      // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP)) {
-      done = true;
-      }
-    } else {
-      // if we haven't seen a match before, then it doesn't matter what we see now, except to mark
-      // if we've seen a match
-      if (code.equals(ReturnCode.INCLUDE)) {
-        previousMatchFound = true;
-      }
-    }
-    return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/33cb45d0/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.covered.filter.FamilyOnlyFilter;
-import org.junit.Test;
-
-/**
- * Test that the family only filter only allows a single family through
- */
-public class TestFamilyOnlyFilter {
-
-  byte[] row = new byte[] { 'a' };
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testPassesFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family!", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testPassesTargetFamilyAsNonFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testResetFilter() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    // we shouldn't match the family again - everything after a switched family should be ignored
-    code = filter.filterKeyValue(accept);
-    assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.SKIP, code);
-
-    // reset the filter and we should accept it again
-    filter.reset();
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family after reset", ReturnCode.INCLUDE, code);
-  }
-}


[07/31] phoenix git commit: PHOENIX-1676 Set priority of Index Updates correctly

Posted by ap...@apache.org.
PHOENIX-1676 Set priority of Index Updates correctly


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8b0591ec
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8b0591ec
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8b0591ec

Branch: refs/heads/4.x-HBase-1.x
Commit: 8b0591ecd38ffa6a110f9fd5d9c8ce086d537e2c
Parents: 096586e
Author: Thomas <td...@salesforce.com>
Authored: Mon Mar 23 22:17:16 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Mon Mar 23 22:40:55 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/index/IndexQosIT.java       | 240 +++++++++++++++++++
 .../hbase/ipc/PhoenixIndexRpcScheduler.java     |   3 +
 .../phoenix/hbase/index/IndexQosCompat.java     |  98 --------
 .../index/IndexQosRpcControllerFactory.java     |  12 +-
 .../index/table/CoprocessorHTableFactory.java   |  20 --
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   7 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |   7 +
 7 files changed, 260 insertions(+), 127 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b0591ec/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
new file mode 100644
index 0000000..7338b40
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
+import static org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
+import static org.apache.phoenix.util.TestUtil.LOCALHOST;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
+import org.apache.hadoop.hbase.ipc.CallRunner;
+import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.RpcExecutor;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
+import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
+import org.apache.phoenix.jdbc.PhoenixTestDriver;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class IndexQosIT extends BaseTest {
+
+    private static final String SCHEMA_NAME = "S";
+    private static final String INDEX_TABLE_NAME = "I";
+    private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
+    private static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "I");
+    private static final int NUM_SLAVES = 2;
+
+    private static String url;
+    private static PhoenixTestDriver driver;
+    private HBaseTestingUtility util;
+    private HBaseAdmin admin;
+    private Configuration conf;
+    private static RpcExecutor spyRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-queue", 30, 1, 300));
+
+    /**
+     * Factory that uses a spyed RpcExecutor
+     */
+    public static class TestPhoenixIndexRpcSchedulerFactory extends PhoenixIndexRpcSchedulerFactory {
+        @Override
+        public RpcScheduler create(Configuration conf, RegionServerServices services) {
+            PhoenixIndexRpcScheduler phoenixIndexRpcScheduler = (PhoenixIndexRpcScheduler)super.create(conf, services);
+            phoenixIndexRpcScheduler.setExecutorForTesting(spyRpcExecutor);
+            return phoenixIndexRpcScheduler;
+        }
+    }
+
+    @Before
+    public void doSetup() throws Exception {
+        conf = HBaseConfiguration.create();
+        setUpConfigForMiniCluster(conf);
+        conf.set(HRegionServer.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+                TestPhoenixIndexRpcSchedulerFactory.class.getName());
+        conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, IndexQosRpcControllerFactory.class.getName());
+        util = new HBaseTestingUtility(conf);
+        // start cluster with 2 region servers
+        util.startMiniCluster(NUM_SLAVES);
+        admin = util.getHBaseAdmin();
+        String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
+        url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
+                + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
+        driver = initAndRegisterDriver(url, ReadOnlyProps.EMPTY_PROPS);
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        try {
+            destroyDriver(driver);
+            if (admin!=null) {
+            	admin.close();
+            }
+        } finally {
+            util.shutdownMiniCluster();
+        }
+    }
+    
+    @Test
+    public void testIndexWriteQos() throws Exception { 
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(url, props);
+
+        // create the table 
+        conn.createStatement().execute(
+                "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+
+        // create the index 
+        conn.createStatement().execute(
+                "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
+
+        byte[] dataTableName = Bytes.toBytes(DATA_TABLE_FULL_NAME);
+        byte[] indexTableName = Bytes.toBytes(INDEX_TABLE_FULL_NAME);
+        MiniHBaseCluster cluster = util.getHBaseCluster();
+        HMaster master = cluster.getMaster();
+        AssignmentManager am = master.getAssignmentManager();
+
+        // verify there is only a single region for data table
+        List<HRegionInfo> tableRegions = admin.getTableRegions(dataTableName);
+        assertEquals("Expected single region for " + dataTableName, tableRegions.size(), 1);
+        HRegionInfo dataHri = tableRegions.get(0);
+
+        // verify there is only a single region for index table
+        tableRegions = admin.getTableRegions(indexTableName);
+        HRegionInfo indexHri = tableRegions.get(0);
+        assertEquals("Expected single region for " + indexTableName, tableRegions.size(), 1);
+
+        ServerName dataServerName = am.getRegionStates().getRegionServerOfRegion(dataHri);
+        ServerName indexServerName = am.getRegionStates().getRegionServerOfRegion(indexHri);
+
+        // if data table and index table are on same region server, move the index table to the other region server
+        if (dataServerName.equals(indexServerName)) {
+            HRegionServer server1 = util.getHBaseCluster().getRegionServer(0);
+            HRegionServer server2 = util.getHBaseCluster().getRegionServer(1);
+            HRegionServer dstServer = null;
+            HRegionServer srcServer = null;
+            if (server1.getServerName().equals(indexServerName)) {
+                dstServer = server2;
+                srcServer = server1;
+            } else {
+                dstServer = server1;
+                srcServer = server2;
+            }
+            byte[] encodedRegionNameInBytes = indexHri.getEncodedNameAsBytes();
+            admin.move(encodedRegionNameInBytes, Bytes.toBytes(dstServer.getServerName().getServerName()));
+            while (dstServer.getOnlineRegion(indexHri.getRegionName()) == null
+                    || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+                    || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+                    || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
+                // wait for the move to be finished
+                Thread.sleep(1);
+            }
+        }
+
+        dataHri = admin.getTableRegions(dataTableName).get(0);
+        dataServerName = am.getRegionStates().getRegionServerOfRegion(dataHri);
+        indexHri = admin.getTableRegions(indexTableName).get(0);
+        indexServerName = am.getRegionStates().getRegionServerOfRegion(indexHri);
+
+        // verify index and data tables are on different servers
+        assertNotEquals("Index and Data table should be on different region servers dataServer " + dataServerName
+                + " indexServer " + indexServerName, dataServerName, indexServerName);
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
+        stmt.setString(1, "k1");
+        stmt.setString(2, "v1");
+        stmt.setString(3, "v2");
+        stmt.execute();
+        conn.commit();
+
+        // run select query that should use the index
+        String selectSql = "SELECT k, v2 from " + DATA_TABLE_FULL_NAME + " WHERE v1=?";
+        stmt = conn.prepareStatement(selectSql);
+        stmt.setString(1, "v1");
+
+        // verify that the query does a range scan on the index table
+        ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
+        assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER S.I ['v1']", QueryUtil.getExplainPlan(rs));
+
+        // verify that the correct results are returned
+        rs = stmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals("k1", rs.getString(1));
+        assertEquals("v2", rs.getString(2));
+        assertFalse(rs.next());
+        
+        // drop index table 
+        conn.createStatement().execute(
+                "DROP INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME );
+        // create a data table with the same name as the index table 
+        conn.createStatement().execute(
+                "CREATE TABLE " + INDEX_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+        
+        // upsert one row to the table (which has the same table name as the previous index table)
+        stmt = conn.prepareStatement("UPSERT INTO " + INDEX_TABLE_FULL_NAME + " VALUES(?,?,?)");
+        stmt.setString(1, "k1");
+        stmt.setString(2, "v1");
+        stmt.setString(3, "v2");
+        stmt.execute();
+        conn.commit();
+        
+        // run select query on the new table
+        selectSql = "SELECT k, v2 from " + INDEX_TABLE_FULL_NAME + " WHERE v1=?";
+        stmt = conn.prepareStatement(selectSql);
+        stmt.setString(1, "v1");
+
+        // verify that the correct results are returned
+        rs = stmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals("k1", rs.getString(1));
+        assertEquals("v2", rs.getString(2));
+        assertFalse(rs.next());
+        
+        // verify that that index queue is used only once (for the first upsert)
+        Mockito.verify(spyRpcExecutor).dispatch(Mockito.any(CallRunner.class));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b0591ec/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
index 98002a7..4709304 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
@@ -43,6 +43,7 @@ public class PhoenixIndexRpcScheduler extends RpcScheduler {
     private int minPriority;
     private int maxPriority;
     private RpcExecutor callExecutor;
+    private int port;
 
     public PhoenixIndexRpcScheduler(int indexHandlerCount, Configuration conf,
             RpcScheduler delegate, int minPriority, int maxPriority) {
@@ -67,11 +68,13 @@ public class PhoenixIndexRpcScheduler extends RpcScheduler {
     @Override
     public void init(Context context) {
         delegate.init(context);
+        this.port = context.getListenerAddress().getPort();
     }
 
     @Override
     public void start() {
         delegate.start();
+        callExecutor.start(port);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b0591ec/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosCompat.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosCompat.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosCompat.java
deleted file mode 100644
index 5681d71..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosCompat.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
-
-/**
- * Helper class to avoid loading HBase 0.98.3+ classes in older HBase installations
- */
-public class IndexQosCompat {
-
-    private static final Log LOG = LogFactory.getLog(IndexQosCompat.class);
-
-    /**
-     * Full class name of the RpcControllerFactory. This is copied here so we don't need the static reference, so we can work with older versions of HBase 0.98, which don't have this class
-     */
-    private static final String HBASE_RPC_CONTROLLER_CLASS_NAME =
-            "org.apache.hadoop.hbase.ipc.RpcControllerFactory";
-    private static volatile boolean checked = false;
-    private static boolean rpcControllerExists = false;
-
-    private IndexQosCompat() {
-        // private ctor for util class
-    }
-
-    /**
-     * @param tableName name of the index table
-     * @return configuration key for if a table should have Index QOS writes (its a target index
-     *         table)
-     */
-    public static String getTableIndexQosConfKey(String tableName) {
-        return "phoenix.index.table.qos._" + tableName;
-    }
-
-    /**
-     * Set the index rpc controller, if the rpc controller exists. No-op if there the RpcController
-     * is not on the classpath.
-     * @param conf to update
-     */
-    public static void setPhoenixIndexRpcController(Configuration conf) {
-        if (rpcControllerExists()) {
-            // then we can load the class just fine
-            conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
-                PhoenixIndexRpcSchedulerFactory.class.getName());
-        }
-    }
-
-    private static boolean rpcControllerExists() {
-        if (checked) {
-            synchronized (IndexQosCompat.class) {
-                if (!checked) {
-                    // try loading the class
-                    try {
-                        Class.forName(HBASE_RPC_CONTROLLER_CLASS_NAME);
-                        rpcControllerExists = true;
-                    } catch (ClassNotFoundException e) {
-                        LOG.warn("RpcControllerFactory doesn't exist, not setting custom index handler properties.");
-                        rpcControllerExists = false;
-                    }
-
-                    checked = true;
-                }
-            }
-        }
-        return rpcControllerExists;
-    }
-
-    /**
-     * Ensure that the given table is enabled for index QOS handling
-     * @param conf configuration to read/update
-     * @param tableName name of the table to configure for index handlers
-     */
-    public static void enableIndexQosForTable(Configuration conf, String tableName) {
-        String confKey = IndexQosCompat.getTableIndexQosConfKey(tableName);
-        if (conf.get(confKey) == null) {
-            conf.setBoolean(confKey, true);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b0591ec/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
index aa8b8d1..a192feb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
+import org.apache.phoenix.util.SchemaUtil;
 
 /**
  * {@link RpcControllerFactory} that overrides the standard {@link PayloadCarryingRpcController} to
@@ -60,27 +61,22 @@ public class IndexQosRpcControllerFactory extends RpcControllerFactory {
 
     private class IndexQosRpcController extends DelegatingPayloadCarryingRpcController {
 
-        private Configuration conf;
         private int priority;
 
         public IndexQosRpcController(PayloadCarryingRpcController delegate, Configuration conf) {
             super(delegate);
-            this.conf = conf;
             this.priority = PhoenixIndexRpcSchedulerFactory.getMinPriority(conf);
         }
-
         @Override
         public void setPriority(final TableName tn) {
             // if its an index table, then we override to the index priority
-            if (isIndexTable(tn)) {
+            if (!tn.isSystemTable() &&  !SchemaUtil.isSystemDataTable(tn.getNameAsString())) {
                 setPriority(this.priority);
-            } else {
+            } 
+            else {
                 super.setPriority(tn);
             }
         }
 
-        private boolean isIndexTable(TableName tn) {
-            return conf.get(IndexQosCompat.getTableIndexQosConfKey(tn.getNameAsString())) == null;
-        }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b0591ec/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
index 72a28be..ded618d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
@@ -19,21 +19,13 @@ package org.apache.phoenix.hbase.index.table;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.IndexQosCompat;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
 
 public class CoprocessorHTableFactory implements HTableFactory {
 
-    private static final Log LOG = LogFactory.getLog(CoprocessorHTableFactory.class);
     private CoprocessorEnvironment e;
 
     public CoprocessorHTableFactory(CoprocessorEnvironment e) {
@@ -42,18 +34,6 @@ public class CoprocessorHTableFactory implements HTableFactory {
 
     @Override
     public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException {
-        Configuration conf = e.getConfiguration();
-
-        // make sure we use the index priority writer for our rpcs
-        IndexQosCompat.setPhoenixIndexRpcController(conf);
-
-        // make sure we include the index table in the tables we need to track
-        String tableName = Bytes.toString(tablename.copyBytesIfNecessary());
-        IndexQosCompat.enableIndexQosForTable(conf, tableName);
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Creating new HTable: " + tableName);
-        }
         return this.e.getTable(TableName.valueOf(tablename.copyBytesIfNecessary()));
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b0591ec/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 154fef7..15bcfd0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -67,6 +67,7 @@ import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
 
@@ -277,7 +278,11 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final int MUTABLE_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "10");
     /** Version below which we fall back on the generic KeyValueBuilder */
     public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = VersionUtil.encodeVersion("0", "94", "14");
-
+    
+    // list of system tables
+    public static final List<String> SYSTEM_TABLE_NAMES = new ImmutableList.Builder<String>().add(SYSTEM_CATALOG_NAME)
+            .add(SYSTEM_STATS_NAME).add(SEQUENCE_FULLNAME).build();
+    
     PhoenixDatabaseMetaData(PhoenixConnection connection) throws SQLException {
         this.emptyResultSet = new PhoenixResultSet(ResultIterator.EMPTY_ITERATOR, RowProjector.EMPTY_PROJECTOR, new PhoenixStatement(connection));
         this.connection = connection;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b0591ec/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 2a1d3ff..4a8341d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -403,6 +403,13 @@ public class SchemaUtil {
         if (QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) return true;
         return false;
     }
+    
+    /**
+     * Returns true if the given table is a system table (does not include future system indexes)
+     */
+    public static boolean isSystemDataTable(String fullTableName) {
+    	return PhoenixDatabaseMetaData.SYSTEM_TABLE_NAMES.contains(fullTableName);
+    }
 
     // Given the splits and the rowKeySchema, find out the keys that 
     public static byte[][] processSplits(byte[][] splits, LinkedHashSet<PColumn> pkColumns, Integer saltBucketNum, boolean defaultRowKeyOrder) throws SQLException {


[05/31] phoenix git commit: PHOENIX-1753 Query with RVC that doesn't lead with the row key can return incorrect results

Posted by ap...@apache.org.
PHOENIX-1753 Query with RVC that doesn't lead with the row key can return incorrect results


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e06ceaf4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e06ceaf4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e06ceaf4

Branch: refs/heads/4.x-HBase-1.x
Commit: e06ceaf455642d92b500d0e4edc343a3342a0d93
Parents: 4bc162d
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Mar 19 18:54:57 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Mar 21 11:13:24 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/RowValueConstructorIT.java  | 33 +++++++++++++++++++-
 .../apache/phoenix/compile/WhereOptimizer.java  |  4 +++
 .../phoenix/compile/WhereOptimizerTest.java     | 16 ++++++++++
 3 files changed, 52 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e06ceaf4/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
index 8d67fa4..3859785 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
@@ -1362,6 +1362,37 @@ public class RowValueConstructorIT extends BaseClientManagedTimeIT {
         conn.close();
     }
 
-
+    @Test
+    public void testRVCWithRowKeyNotLeading() throws Exception {
+        String ddl = "CREATE TABLE sorttest4 (rownum BIGINT primary key, name varchar(16), age integer)";
+        Connection conn = nextConnection(getUrl());
+        conn.createStatement().execute(ddl);
+        conn.close();
+        conn = nextConnection(getUrl());
+        String dml = "UPSERT INTO sorttest4 (rownum, name, age) values (?, ?, ?)";
+        PreparedStatement stmt = conn.prepareStatement(dml);
+        stmt.setInt(1, 1);
+        stmt.setString(2, "A");
+        stmt.setInt(3, 1);
+        stmt.executeUpdate();
+        stmt.setInt(1, 2);
+        stmt.setString(2, "B");
+        stmt.setInt(3, 2);
+        stmt.executeUpdate();
+        conn.commit();
+        conn.close();
+        // the below query should only return one record -> (1, "A", 1)
+        String query = "SELECT rownum, name, age FROM sorttest4 where (age, rownum) < (2, 2)";
+        conn = nextConnection(getUrl());
+        ResultSet rs = conn.createStatement().executeQuery(query);
+        int numRecords = 0;
+        while (rs.next()) {
+            assertEquals(1, rs.getInt(1));
+            assertEquals("A", rs.getString(2));
+            assertEquals(1, rs.getInt(3));
+            numRecords++;
+        }
+        assertEquals(1, numRecords);
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e06ceaf4/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index 713076e..b03793d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -542,6 +542,10 @@ public class WhereOptimizer {
                 int span = position - initialPosition;
                 return new SingleKeySlot(new RowValueConstructorKeyPart(table.getPKColumns().get(initialPosition), rvc, span, childSlots), initialPosition, span, EVERYTHING_RANGES);
             }
+            // If we don't clear the child list, we end up passing some of
+            // the child expressions of previous matches up the tree, causing
+            // those expressions to form the scan start/stop key. PHOENIX-1753
+            childSlots.clear();
             return null;
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e06ceaf4/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index 0ec6b45..94b25d0 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -1275,6 +1275,22 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
     }
     
     @Test
+    public void testRVCExpressionWithNonFirstLeadingColOfRowKey() throws SQLException {
+        String old_value = "value";
+        String orgId = getOrganizationId();
+        
+        String query = "select * from entity_history where (old_value, organization_id) >= (?,?)";
+        List<Object> binds = Arrays.<Object>asList(old_value, orgId);
+        StatementContext context = compileStatement(query, binds);
+        Scan scan = context.getScan();
+        Filter filter = scan.getFilter();
+        assertNotNull(filter);
+        assertTrue(filter instanceof SingleKeyValueComparisonFilter);
+        assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow());
+        assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow());
+    }
+    
+    @Test
     public void testMultiRVCExpressionsCombinedWithAnd() throws SQLException {
         String lowerTenantId = "000000000000001";
         String lowerParentId = "000000000000002";


[08/31] phoenix git commit: PHOENIX-1676 Set priority of Index Updates correctly, fix IndexQosIT

Posted by ap...@apache.org.
PHOENIX-1676 Set priority of Index Updates correctly, fix IndexQosIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f4180fa4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f4180fa4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f4180fa4

Branch: refs/heads/4.x-HBase-1.x
Commit: f4180fa40e26c685bfbf1b59cf4385f9b0e713e9
Parents: 8b0591e
Author: Thomas <td...@salesforce.com>
Authored: Mon Mar 23 22:51:53 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Mon Mar 23 22:54:56 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/phoenix/end2end/index/IndexQosIT.java | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4180fa4/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
index 7338b40..9558bcb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -36,13 +37,14 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
 import org.apache.hadoop.hbase.ipc.CallRunner;
 import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.RpcExecutor;
 import org.apache.hadoop.hbase.ipc.RpcScheduler;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
@@ -82,8 +84,8 @@ public class IndexQosIT extends BaseTest {
      */
     public static class TestPhoenixIndexRpcSchedulerFactory extends PhoenixIndexRpcSchedulerFactory {
         @Override
-        public RpcScheduler create(Configuration conf, RegionServerServices services) {
-            PhoenixIndexRpcScheduler phoenixIndexRpcScheduler = (PhoenixIndexRpcScheduler)super.create(conf, services);
+        public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
+            PhoenixIndexRpcScheduler phoenixIndexRpcScheduler = (PhoenixIndexRpcScheduler)super.create(conf, priorityFunction, abortable);
             phoenixIndexRpcScheduler.setExecutorForTesting(spyRpcExecutor);
             return phoenixIndexRpcScheduler;
         }
@@ -93,7 +95,7 @@ public class IndexQosIT extends BaseTest {
     public void doSetup() throws Exception {
         conf = HBaseConfiguration.create();
         setUpConfigForMiniCluster(conf);
-        conf.set(HRegionServer.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+        conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
                 TestPhoenixIndexRpcSchedulerFactory.class.getName());
         conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, IndexQosRpcControllerFactory.class.getName());
         util = new HBaseTestingUtility(conf);


[21/31] phoenix git commit: PHOENIX-1790 Fix test failures due to incorrect shadowing of @AfterClass methods. (Samarth Jain)

Posted by ap...@apache.org.
PHOENIX-1790 Fix test failures due to incorrect shadowing of @AfterClass methods. (Samarth Jain)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0d74cff2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0d74cff2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0d74cff2

Branch: refs/heads/4.x-HBase-1.x
Commit: 0d74cff2889aa259142091b830d7e181935e890d
Parents: fcedbe6
Author: Thomas <td...@salesforce.com>
Authored: Mon Mar 30 10:53:31 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Mon Mar 30 10:53:31 2015 -0700

----------------------------------------------------------------------
 .../src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java     | 2 +-
 .../src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java     | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0d74cff2/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
index c079a30..deb14db 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
@@ -57,7 +57,7 @@ public class PhoenixClientRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
     }
     
     @AfterClass
-    public static void doTeardown() throws Exception {
+    public static void cleanUpAfterTestSuite() throws Exception {
         TestPhoenixIndexRpcSchedulerFactory.reset();
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0d74cff2/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index de0ab84..b04f636 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -76,7 +76,7 @@ public class PhoenixServerRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
     }
     
     @AfterClass
-    public static void doTeardown() throws Exception {
+    public static void cleanUpAfterTestSuite() throws Exception {
         TestPhoenixIndexRpcSchedulerFactory.reset();
     }
     


[17/31] phoenix git commit: PHOENIX-1457 Use high priority queue for metadata endpoint calls

Posted by ap...@apache.org.
PHOENIX-1457 Use high priority queue for metadata endpoint calls


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f0c2ed4e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f0c2ed4e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f0c2ed4e

Branch: refs/heads/4.x-HBase-1.x
Commit: f0c2ed4e567eb4efc5a59d70d8880800b144fd09
Parents: 24ee2c6
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Tue Mar 24 17:17:44 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Fri Mar 27 11:54:40 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/index/IndexHandlerIT.java   |  12 +-
 .../phoenix/end2end/index/IndexQosIT.java       | 242 -------------------
 .../apache/phoenix/rpc/PhoenixClientRpcIT.java  | 122 ++++++++++
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  | 235 ++++++++++++++++++
 .../TestPhoenixIndexRpcSchedulerFactory.java    |  64 +++++
 .../hbase/ipc/PhoenixIndexRpcScheduler.java     | 123 ----------
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   | 123 ++++++++++
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java   |  95 ++++++++
 .../controller/ClientRpcControllerFactory.java  |  60 +++++
 .../ipc/controller/IndexRpcController.java      |  51 ++++
 .../ipc/controller/MetadataRpcController.java   |  55 +++++
 .../controller/ServerRpcControllerFactory.java  |  62 +++++
 .../index/IndexQosRpcControllerFactory.java     |  82 -------
 .../ipc/PhoenixIndexRpcSchedulerFactory.java    |  90 -------
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   4 -
 .../org/apache/phoenix/query/QueryServices.java |   5 +-
 .../phoenix/query/QueryServicesOptions.java     |  12 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |   7 -
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  16 +-
 .../PhoenixIndexRpcSchedulerFactoryTest.java    | 106 --------
 .../PhoenixRpcSchedulerFactoryTest.java         | 125 ++++++++++
 .../java/org/apache/phoenix/query/BaseTest.java |  12 +-
 22 files changed, 1023 insertions(+), 680 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
index 1507d6b..20a780a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
 import org.apache.phoenix.hbase.index.TableName;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.junit.After;
@@ -53,11 +53,11 @@ public class IndexHandlerIT {
 
     public static class CountingIndexClientRpcFactory extends RpcControllerFactory {
 
-        private IndexQosRpcControllerFactory delegate;
+        private ServerRpcControllerFactory delegate;
 
         public CountingIndexClientRpcFactory(Configuration conf) {
             super(conf);
-            this.delegate = new IndexQosRpcControllerFactory(conf);
+            this.delegate = new ServerRpcControllerFactory(conf);
         }
 
         @Override
@@ -146,8 +146,8 @@ public class IndexHandlerIT {
         conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
             CountingIndexClientRpcFactory.class.getName());
         // and set the index table as the current table
-        conf.setStrings(IndexQosRpcControllerFactory.INDEX_TABLE_NAMES_KEY,
-            TestTable.getTableNameString());
+//        conf.setStrings(PhoenixRpcControllerFactory.INDEX_TABLE_NAMES_KEY,
+//            TestTable.getTableNameString());
         HTable table = new HTable(conf, TestTable.getTableName());
 
         // do a write to the table
@@ -159,7 +159,7 @@ public class IndexHandlerIT {
         // check the counts on the rpc controller
         assertEquals("Didn't get the expected number of index priority writes!", 1,
             (int) CountingIndexClientRpcController.priorityCounts
-                    .get(QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY));
+                    .get(QueryServicesOptions.DEFAULT_INDEX_PRIORITY));
 
         table.close();
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
deleted file mode 100644
index 9558bcb..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
- * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
- * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
- */
-package org.apache.phoenix.end2end.index;
-
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
-import static org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
-import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
-import org.apache.hadoop.hbase.ipc.CallRunner;
-import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
-import org.apache.hadoop.hbase.ipc.PriorityFunction;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.RpcExecutor;
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.RSRpcServices;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
-import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
-import org.apache.phoenix.jdbc.PhoenixTestDriver;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.SchemaUtil;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class IndexQosIT extends BaseTest {
-
-    private static final String SCHEMA_NAME = "S";
-    private static final String INDEX_TABLE_NAME = "I";
-    private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
-    private static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "I");
-    private static final int NUM_SLAVES = 2;
-
-    private static String url;
-    private static PhoenixTestDriver driver;
-    private HBaseTestingUtility util;
-    private HBaseAdmin admin;
-    private Configuration conf;
-    private static RpcExecutor spyRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-queue", 30, 1, 300));
-
-    /**
-     * Factory that uses a spyed RpcExecutor
-     */
-    public static class TestPhoenixIndexRpcSchedulerFactory extends PhoenixIndexRpcSchedulerFactory {
-        @Override
-        public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
-            PhoenixIndexRpcScheduler phoenixIndexRpcScheduler = (PhoenixIndexRpcScheduler)super.create(conf, priorityFunction, abortable);
-            phoenixIndexRpcScheduler.setExecutorForTesting(spyRpcExecutor);
-            return phoenixIndexRpcScheduler;
-        }
-    }
-
-    @Before
-    public void doSetup() throws Exception {
-        conf = HBaseConfiguration.create();
-        setUpConfigForMiniCluster(conf);
-        conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-                TestPhoenixIndexRpcSchedulerFactory.class.getName());
-        conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, IndexQosRpcControllerFactory.class.getName());
-        util = new HBaseTestingUtility(conf);
-        // start cluster with 2 region servers
-        util.startMiniCluster(NUM_SLAVES);
-        admin = util.getHBaseAdmin();
-        String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
-        url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
-                + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
-        driver = initAndRegisterDriver(url, ReadOnlyProps.EMPTY_PROPS);
-    }
-
-    @After
-    public void tearDown() throws Exception {
-        try {
-            destroyDriver(driver);
-            if (admin!=null) {
-            	admin.close();
-            }
-        } finally {
-            util.shutdownMiniCluster();
-        }
-    }
-    
-    @Test
-    public void testIndexWriteQos() throws Exception { 
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        Connection conn = driver.connect(url, props);
-
-        // create the table 
-        conn.createStatement().execute(
-                "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
-
-        // create the index 
-        conn.createStatement().execute(
-                "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
-
-        byte[] dataTableName = Bytes.toBytes(DATA_TABLE_FULL_NAME);
-        byte[] indexTableName = Bytes.toBytes(INDEX_TABLE_FULL_NAME);
-        MiniHBaseCluster cluster = util.getHBaseCluster();
-        HMaster master = cluster.getMaster();
-        AssignmentManager am = master.getAssignmentManager();
-
-        // verify there is only a single region for data table
-        List<HRegionInfo> tableRegions = admin.getTableRegions(dataTableName);
-        assertEquals("Expected single region for " + dataTableName, tableRegions.size(), 1);
-        HRegionInfo dataHri = tableRegions.get(0);
-
-        // verify there is only a single region for index table
-        tableRegions = admin.getTableRegions(indexTableName);
-        HRegionInfo indexHri = tableRegions.get(0);
-        assertEquals("Expected single region for " + indexTableName, tableRegions.size(), 1);
-
-        ServerName dataServerName = am.getRegionStates().getRegionServerOfRegion(dataHri);
-        ServerName indexServerName = am.getRegionStates().getRegionServerOfRegion(indexHri);
-
-        // if data table and index table are on same region server, move the index table to the other region server
-        if (dataServerName.equals(indexServerName)) {
-            HRegionServer server1 = util.getHBaseCluster().getRegionServer(0);
-            HRegionServer server2 = util.getHBaseCluster().getRegionServer(1);
-            HRegionServer dstServer = null;
-            HRegionServer srcServer = null;
-            if (server1.getServerName().equals(indexServerName)) {
-                dstServer = server2;
-                srcServer = server1;
-            } else {
-                dstServer = server1;
-                srcServer = server2;
-            }
-            byte[] encodedRegionNameInBytes = indexHri.getEncodedNameAsBytes();
-            admin.move(encodedRegionNameInBytes, Bytes.toBytes(dstServer.getServerName().getServerName()));
-            while (dstServer.getOnlineRegion(indexHri.getRegionName()) == null
-                    || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
-                    || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
-                    || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
-                // wait for the move to be finished
-                Thread.sleep(1);
-            }
-        }
-
-        dataHri = admin.getTableRegions(dataTableName).get(0);
-        dataServerName = am.getRegionStates().getRegionServerOfRegion(dataHri);
-        indexHri = admin.getTableRegions(indexTableName).get(0);
-        indexServerName = am.getRegionStates().getRegionServerOfRegion(indexHri);
-
-        // verify index and data tables are on different servers
-        assertNotEquals("Index and Data table should be on different region servers dataServer " + dataServerName
-                + " indexServer " + indexServerName, dataServerName, indexServerName);
-
-        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
-        stmt.setString(1, "k1");
-        stmt.setString(2, "v1");
-        stmt.setString(3, "v2");
-        stmt.execute();
-        conn.commit();
-
-        // run select query that should use the index
-        String selectSql = "SELECT k, v2 from " + DATA_TABLE_FULL_NAME + " WHERE v1=?";
-        stmt = conn.prepareStatement(selectSql);
-        stmt.setString(1, "v1");
-
-        // verify that the query does a range scan on the index table
-        ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
-        assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER S.I ['v1']", QueryUtil.getExplainPlan(rs));
-
-        // verify that the correct results are returned
-        rs = stmt.executeQuery();
-        assertTrue(rs.next());
-        assertEquals("k1", rs.getString(1));
-        assertEquals("v2", rs.getString(2));
-        assertFalse(rs.next());
-        
-        // drop index table 
-        conn.createStatement().execute(
-                "DROP INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME );
-        // create a data table with the same name as the index table 
-        conn.createStatement().execute(
-                "CREATE TABLE " + INDEX_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
-        
-        // upsert one row to the table (which has the same table name as the previous index table)
-        stmt = conn.prepareStatement("UPSERT INTO " + INDEX_TABLE_FULL_NAME + " VALUES(?,?,?)");
-        stmt.setString(1, "k1");
-        stmt.setString(2, "v1");
-        stmt.setString(3, "v2");
-        stmt.execute();
-        conn.commit();
-        
-        // run select query on the new table
-        selectSql = "SELECT k, v2 from " + INDEX_TABLE_FULL_NAME + " WHERE v1=?";
-        stmt = conn.prepareStatement(selectSql);
-        stmt.setString(1, "v1");
-
-        // verify that the correct results are returned
-        rs = stmt.executeQuery();
-        assertTrue(rs.next());
-        assertEquals("k1", rs.getString(1));
-        assertEquals("v2", rs.getString(2));
-        assertFalse(rs.next());
-        
-        // verify that that index queue is used only once (for the first upsert)
-        Mockito.verify(spyRpcExecutor).dispatch(Mockito.any(CallRunner.class));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
new file mode 100644
index 0000000..c079a30
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.rpc;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.ipc.CallRunner;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import com.google.common.collect.Maps;
+
+public class PhoenixClientRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
+
+    private static final String SCHEMA_NAME = "S";
+    private static final String INDEX_TABLE_NAME = "I";
+    private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
+
+    @BeforeClass
+    public static void doSetup() throws Exception {
+        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
+        serverProps.put(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+                TestPhoenixIndexRpcSchedulerFactory.class.getName());
+        serverProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ServerRpcControllerFactory.class.getName());
+        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
+        clientProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ClientRpcControllerFactory.class.getName());
+        NUM_SLAVES_BASE = 2;
+        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet()
+                .iterator()));
+    }
+    
+    @AfterClass
+    public static void doTeardown() throws Exception {
+        TestPhoenixIndexRpcSchedulerFactory.reset();
+    }
+
+    @Test
+    public void testIndexQos() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(getUrl(), props);
+        try {
+            // create the table
+            conn.createStatement().execute(
+                    "CREATE TABLE " + DATA_TABLE_FULL_NAME
+                            + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true");
+
+            // create the index
+            conn.createStatement().execute(
+                    "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
+
+            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
+            stmt.setString(1, "k1");
+            stmt.setString(2, "v1");
+            stmt.setString(3, "v2");
+            stmt.execute();
+            conn.commit();
+
+            // run select query that should use the index
+            String selectSql = "SELECT k, v2 from " + DATA_TABLE_FULL_NAME + " WHERE v1=?";
+            stmt = conn.prepareStatement(selectSql);
+            stmt.setString(1, "v1");
+
+            // verify that the query does a range scan on the index table
+            ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
+            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER S.I ['v1']", QueryUtil.getExplainPlan(rs));
+
+            // verify that the correct results are returned
+            rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals("k1", rs.getString(1));
+            assertEquals("v2", rs.getString(2));
+            assertFalse(rs.next());
+
+            // verify that index queue is not used (since the index writes originate from a client an not a region server)
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor(), Mockito.never()).dispatch(Mockito.any(CallRunner.class));
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testMetadataQos() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(getUrl(), props);
+        try {
+            // create the table
+            conn.createStatement().execute("CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)");
+            // verify that that metadata queue is used at least once
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getMetadataRpcExecutor(), Mockito.atLeastOnce()).dispatch(Mockito.any(CallRunner.class));
+        } finally {
+            conn.close();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
new file mode 100644
index 0000000..de0ab84
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.rpc;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.ipc.CallRunner;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import com.google.common.collect.Maps;
+
+public class PhoenixServerRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
+
+    private static final String SCHEMA_NAME = "S";
+    private static final String INDEX_TABLE_NAME = "I";
+    private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
+    private static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "I");
+    
+    @BeforeClass
+    public static void doSetup() throws Exception {
+        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
+        serverProps.put(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+                TestPhoenixIndexRpcSchedulerFactory.class.getName());
+        serverProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ServerRpcControllerFactory.class.getName());
+        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
+        clientProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, RpcControllerFactory.class.getName());
+        NUM_SLAVES_BASE = 2;
+        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
+    }
+    
+    @AfterClass
+    public static void doTeardown() throws Exception {
+        TestPhoenixIndexRpcSchedulerFactory.reset();
+    }
+    
+    @Test
+    public void testIndexQos() throws Exception { 
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(getUrl(), props);
+        try {
+            // create the table 
+            conn.createStatement().execute(
+                    "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+    
+            // create the index 
+            conn.createStatement().execute(
+                    "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
+
+            ensureTablesOnDifferentRegionServers(DATA_TABLE_FULL_NAME, INDEX_TABLE_FULL_NAME);
+    
+            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
+            stmt.setString(1, "k1");
+            stmt.setString(2, "v1");
+            stmt.setString(3, "v2");
+            stmt.execute();
+            conn.commit();
+    
+            // run select query that should use the index
+            String selectSql = "SELECT k, v2 from " + DATA_TABLE_FULL_NAME + " WHERE v1=?";
+            stmt = conn.prepareStatement(selectSql);
+            stmt.setString(1, "v1");
+    
+            // verify that the query does a range scan on the index table
+            ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
+            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER S.I ['v1']", QueryUtil.getExplainPlan(rs));
+    
+            // verify that the correct results are returned
+            rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals("k1", rs.getString(1));
+            assertEquals("v2", rs.getString(2));
+            assertFalse(rs.next());
+            
+            // drop index table 
+            conn.createStatement().execute(
+                    "DROP INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME );
+            // create a data table with the same name as the index table 
+            conn.createStatement().execute(
+                    "CREATE TABLE " + INDEX_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+            
+            // upsert one row to the table (which has the same table name as the previous index table)
+            stmt = conn.prepareStatement("UPSERT INTO " + INDEX_TABLE_FULL_NAME + " VALUES(?,?,?)");
+            stmt.setString(1, "k1");
+            stmt.setString(2, "v1");
+            stmt.setString(3, "v2");
+            stmt.execute();
+            conn.commit();
+            
+            // run select query on the new table
+            selectSql = "SELECT k, v2 from " + INDEX_TABLE_FULL_NAME + " WHERE v1=?";
+            stmt = conn.prepareStatement(selectSql);
+            stmt.setString(1, "v1");
+    
+            // verify that the correct results are returned
+            rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals("k1", rs.getString(1));
+            assertEquals("v2", rs.getString(2));
+            assertFalse(rs.next());
+            
+            // verify that that index queue is used only once (for the first upsert)
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
+        }
+        finally {
+            conn.close();
+        }
+    }
+
+	/**
+	 * Verifies that the given tables each have a single region and are on
+	 * different region servers. If they are on the same server moves tableName2
+	 * to the other region server.
+	 */
+	private void ensureTablesOnDifferentRegionServers(String tableName1, String tableName2) throws Exception  {
+		byte[] table1 = Bytes.toBytes(tableName1);
+		byte[] table2 = Bytes.toBytes(tableName2);
+		HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
+		HBaseTestingUtility util = getUtility();
+		MiniHBaseCluster cluster = util.getHBaseCluster();
+		HMaster master = cluster.getMaster();
+		AssignmentManager am = master.getAssignmentManager();
+   
+		// verify there is only a single region for data table
+		List<HRegionInfo> tableRegions = admin.getTableRegions(table1);
+		assertEquals("Expected single region for " + table1, tableRegions.size(), 1);
+		HRegionInfo hri1 = tableRegions.get(0);
+   
+		// verify there is only a single region for index table
+		tableRegions = admin.getTableRegions(table2);
+		HRegionInfo hri2 = tableRegions.get(0);
+		assertEquals("Expected single region for " + table2, tableRegions.size(), 1);
+   
+		ServerName serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
+		ServerName serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2);
+   
+		// if data table and index table are on same region server, move the index table to the other region server
+		if (serverName1.equals(serverName2)) {
+		    HRegionServer server1 = util.getHBaseCluster().getRegionServer(0);
+		    HRegionServer server2 = util.getHBaseCluster().getRegionServer(1);
+		    HRegionServer dstServer = null;
+		    HRegionServer srcServer = null;
+		    if (server1.getServerName().equals(serverName2)) {
+		        dstServer = server2;
+		        srcServer = server1;
+		    } else {
+		        dstServer = server1;
+		        srcServer = server2;
+		    }
+		    byte[] encodedRegionNameInBytes = hri2.getEncodedNameAsBytes();
+		    admin.move(encodedRegionNameInBytes, Bytes.toBytes(dstServer.getServerName().getServerName()));
+		    while (dstServer.getOnlineRegion(hri2.getRegionName()) == null
+		            || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+		            || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+		            || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
+		        // wait for the move to be finished
+		        Thread.sleep(1);
+		    }
+		}
+   
+		hri1 = admin.getTableRegions(table1).get(0);
+		serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
+		hri2 = admin.getTableRegions(table2).get(0);
+		serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2);
+
+		// verify index and data tables are on different servers
+		assertNotEquals("Tables " + tableName1 + " and " + tableName2 + " should be on different region servers", serverName1, serverName2);
+	}
+    
+    @Test
+    public void testMetadataQos() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(getUrl(), props);
+        try {
+        	ensureTablesOnDifferentRegionServers(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME);
+            // create the table 
+            conn.createStatement().execute(
+                    "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)");
+            // query the table from another connection, so that SYSTEM.STATS will be used 
+            conn.createStatement().execute("SELECT * FROM "+DATA_TABLE_FULL_NAME);
+            // verify that that metadata queue is used once 
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getMetadataRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
+        }
+        finally {
+            conn.close();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
new file mode 100644
index 0000000..fb29985
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.rpc;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcScheduler;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
+import org.apache.hadoop.hbase.ipc.RpcExecutor;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.mockito.Mockito;
+
+public class TestPhoenixIndexRpcSchedulerFactory extends PhoenixRpcSchedulerFactory {
+    
+    private static RpcExecutor indexRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-index-queue", 30, 1,
+            300));
+    private static RpcExecutor metadataRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-metataqueue", 30,
+            1, 300));
+
+    @Override
+    public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
+        PhoenixRpcScheduler phoenixIndexRpcScheduler = (PhoenixRpcScheduler)super.create(conf, priorityFunction, abortable);
+        phoenixIndexRpcScheduler.setIndexExecutorForTesting(indexRpcExecutor);
+        phoenixIndexRpcScheduler.setMetadataExecutorForTesting(metadataRpcExecutor);
+        return phoenixIndexRpcScheduler;
+    }
+    
+    @Override
+    public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) {
+        return create(configuration, priorityFunction, null);
+    }
+    
+    public static RpcExecutor getIndexRpcExecutor() {
+        return indexRpcExecutor;
+    }
+    
+    public static RpcExecutor getMetadataRpcExecutor() {
+        return metadataRpcExecutor;
+    }
+    
+    public static void reset() {
+        Mockito.reset(metadataRpcExecutor);
+        Mockito.reset(indexRpcExecutor);
+    }
+}
+
+

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
deleted file mode 100644
index 4709304..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.ipc;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * {@link RpcScheduler} that first checks to see if this is an index update before passing off the
- * call to the delegate {@link RpcScheduler}.
- * <p>
- * We reserve the range (1000, 1050], by default (though it is configurable), for index priority
- * writes. Currently, we don't do any prioritization within that range - all index writes are
- * treated with the same priority and put into the same queue.
- */
-public class PhoenixIndexRpcScheduler extends RpcScheduler {
-
-    // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
-    public static final String CALL_QUEUE_READ_SHARE_CONF_KEY = "ipc.server.callqueue.read.share";
-    public static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY =
-            "ipc.server.callqueue.handler.factor";
-    private static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
-
-    private RpcScheduler delegate;
-    private int minPriority;
-    private int maxPriority;
-    private RpcExecutor callExecutor;
-    private int port;
-
-    public PhoenixIndexRpcScheduler(int indexHandlerCount, Configuration conf,
-            RpcScheduler delegate, int minPriority, int maxPriority) {
-        int maxQueueLength =
-                conf.getInt("ipc.server.max.callqueue.length", indexHandlerCount
-                        * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
-
-        // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
-        float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0);
-        int numCallQueues =
-                Math.max(1, Math.round(indexHandlerCount * callQueuesHandlersFactor));
-
-        this.minPriority = minPriority;
-        this.maxPriority = maxPriority;
-        this.delegate = delegate;
-
-        this.callExecutor =
-                new BalancedQueueRpcExecutor("Index", indexHandlerCount, numCallQueues,
-                        maxQueueLength);
-    }
-
-    @Override
-    public void init(Context context) {
-        delegate.init(context);
-        this.port = context.getListenerAddress().getPort();
-    }
-
-    @Override
-    public void start() {
-        delegate.start();
-        callExecutor.start(port);
-    }
-
-    @Override
-    public void stop() {
-        delegate.stop();
-        callExecutor.stop();
-    }
-
-    @Override
-    public void dispatch(CallRunner callTask) throws InterruptedException, IOException {
-        RpcServer.Call call = callTask.getCall();
-        int priority = call.header.getPriority();
-        if (minPriority <= priority && priority < maxPriority) {
-            callExecutor.dispatch(callTask);
-        } else {
-            delegate.dispatch(callTask);
-        }
-    }
-
-    @Override
-    public int getGeneralQueueLength() {
-        // not the best way to calculate, but don't have a better way to hook
-        // into metrics at the moment
-        return this.delegate.getGeneralQueueLength() + this.callExecutor.getQueueLength();
-    }
-
-    @Override
-    public int getPriorityQueueLength() {
-        return this.delegate.getPriorityQueueLength();
-    }
-
-    @Override
-    public int getReplicationQueueLength() {
-        return this.delegate.getReplicationQueueLength();
-    }
-
-    @Override
-    public int getActiveRpcHandlerCount() {
-        return this.delegate.getActiveRpcHandlerCount() + this.callExecutor.getActiveHandlerCount();
-    }
-
-    @VisibleForTesting
-    public void setExecutorForTesting(RpcExecutor executor) {
-        this.callExecutor = executor;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
new file mode 100644
index 0000000..e721271
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * {@link RpcScheduler} that first checks to see if this is an index or metedata update before passing off the
+ * call to the delegate {@link RpcScheduler}.
+ */
+public class PhoenixRpcScheduler extends RpcScheduler {
+
+    // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
+    private static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY = "ipc.server.callqueue.handler.factor";
+    private static final String CALLQUEUE_LENGTH_CONF_KEY = "ipc.server.max.callqueue.length";
+    private static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
+
+    private RpcScheduler delegate;
+    private int indexPriority;
+    private int metadataPriority;
+    private RpcExecutor indexCallExecutor;
+    private RpcExecutor metadataCallExecutor;
+    private int port;
+
+    public PhoenixRpcScheduler(Configuration conf, RpcScheduler delegate, int indexPriority, int metadataPriority) {
+        // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
+        int maxQueueLength =  conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
+        float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0);
+        int numQueues = Math.max(1, Math.round(callQueuesHandlersFactor));
+
+        this.indexPriority = indexPriority;
+        this.metadataPriority = metadataPriority;
+        this.delegate = delegate;
+        this.indexCallExecutor = new BalancedQueueRpcExecutor("Index", 1, numQueues, maxQueueLength);
+        this.metadataCallExecutor = new BalancedQueueRpcExecutor("Metadata", 1, numQueues, maxQueueLength);
+    }
+
+    @Override
+    public void init(Context context) {
+        delegate.init(context);
+        this.port = context.getListenerAddress().getPort();
+    }
+
+    @Override
+    public void start() {
+        delegate.start();
+        indexCallExecutor.start(port);
+        metadataCallExecutor.start(port);
+    }
+
+    @Override
+    public void stop() {
+        delegate.stop();
+        indexCallExecutor.stop();
+        metadataCallExecutor.stop();
+    }
+
+    @Override
+    public void dispatch(CallRunner callTask) throws InterruptedException, IOException {
+        RpcServer.Call call = callTask.getCall();
+        int priority = call.header.getPriority();
+        if (indexPriority == priority) {
+            indexCallExecutor.dispatch(callTask);
+        } else if (metadataPriority == priority) {
+            metadataCallExecutor.dispatch(callTask);
+        } else {
+            delegate.dispatch(callTask);
+        }
+    }
+
+    @Override
+    public int getGeneralQueueLength() {
+        // not the best way to calculate, but don't have a better way to hook
+        // into metrics at the moment
+        return this.delegate.getGeneralQueueLength() + this.indexCallExecutor.getQueueLength() + this.metadataCallExecutor.getQueueLength();
+    }
+
+    @Override
+    public int getPriorityQueueLength() {
+        return this.delegate.getPriorityQueueLength();
+    }
+
+    @Override
+    public int getReplicationQueueLength() {
+        return this.delegate.getReplicationQueueLength();
+    }
+
+    @Override
+    public int getActiveRpcHandlerCount() {
+        return this.delegate.getActiveRpcHandlerCount() + this.indexCallExecutor.getActiveHandlerCount() + this.metadataCallExecutor.getActiveHandlerCount();
+    }
+
+    @VisibleForTesting
+    public void setIndexExecutorForTesting(RpcExecutor executor) {
+        this.indexCallExecutor = executor;
+    }
+    
+    @VisibleForTesting
+    public void setMetadataExecutorForTesting(RpcExecutor executor) {
+        this.metadataCallExecutor = executor;
+    }
+    
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
new file mode 100644
index 0000000..a697382
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
+import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Factory to create a {@link PhoenixRpcScheduler}. In this package so we can access the
+ * {@link SimpleRpcSchedulerFactory}.
+ */
+public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixRpcSchedulerFactory.class);
+
+    private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
+            "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
+
+    @Override
+    public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
+        // create the delegate scheduler
+        RpcScheduler delegate;
+        try {
+            // happens in <=0.98.4 where the scheduler factory is not visible
+            delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
+        } catch (IllegalAccessError e) {
+            LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
+            throw e;
+        }
+
+        // get the index priority configs
+        int indexPriority = getIndexPriority(conf);
+        validatePriority(indexPriority);
+        // get the metadata priority configs
+        int metadataPriority = getMetadataPriority(conf);
+        validatePriority(metadataPriority);
+
+        // validate index and metadata priorities are not the same
+        Preconditions.checkArgument(indexPriority != metadataPriority, "Index and Metadata priority must not be same "+ indexPriority);
+        LOG.info("Using custom Phoenix Index RPC Handling with index rpc priority " + indexPriority + " and metadata rpc priority " + metadataPriority);
+
+        PhoenixRpcScheduler scheduler =
+                new PhoenixRpcScheduler(conf, delegate, indexPriority, metadataPriority);
+        return scheduler;
+    }
+
+    @Override
+    public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) {
+        return create(configuration, priorityFunction, null);
+    }
+
+    /**
+     * Validates that the given priority does not overlap with the HBase priority range
+     */
+    private void validatePriority(int priority) {
+        Preconditions.checkArgument( priority < HConstants.NORMAL_QOS || priority > HConstants.HIGH_QOS, "priority cannot be within hbase priority range " 
+        			+ HConstants.NORMAL_QOS +" to " + HConstants.HIGH_QOS ); 
+    }
+
+    public static int getIndexPriority(Configuration conf) {
+        return conf.getInt(QueryServices.INDEX_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_PRIORITY);
+    }
+    
+    public static int getMetadataPriority(Configuration conf) {
+        return conf.getInt(QueryServices.METADATA_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_METADATA_PRIORITY);
+    }
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
new file mode 100644
index 0000000..5a7dcc2
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+/**
+ * {@link RpcControllerFactory} that sets the priority of metadata rpc calls to be processed
+ * in its own queue.
+ */
+public class ClientRpcControllerFactory extends RpcControllerFactory {
+
+    public ClientRpcControllerFactory(Configuration conf) {
+        super(conf);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController() {
+        PayloadCarryingRpcController delegate = super.newController();
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
+        PayloadCarryingRpcController delegate = super.newController(cellScanner);
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables) {
+        PayloadCarryingRpcController delegate = super.newController(cellIterables);
+        return getController(delegate);
+    }
+    
+    private PayloadCarryingRpcController getController(PayloadCarryingRpcController delegate) {
+		return new MetadataRpcController(delegate, conf);
+    }
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
new file mode 100644
index 0000000..fdb1d33
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+
+class IndexRpcController extends DelegatingPayloadCarryingRpcController {
+
+    private final int priority;
+    private final String tracingTableName;
+    
+    public IndexRpcController(PayloadCarryingRpcController delegate, Configuration conf) {
+        super(delegate);
+        this.priority = PhoenixRpcSchedulerFactory.getIndexPriority(conf);
+        this.tracingTableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB,
+                QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
+    }
+    
+    @Override
+    public void setPriority(final TableName tn) {
+		if (!tn.isSystemTable() && !tn.getNameAsString().equals(tracingTableName)) {
+			setPriority(this.priority);
+		}  
+        else {
+            super.setPriority(tn);
+        }
+    }
+    
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
new file mode 100644
index 0000000..23b9f03
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+
+import com.google.common.collect.ImmutableList;
+
+class MetadataRpcController extends DelegatingPayloadCarryingRpcController {
+
+	private int priority;
+	// list of system tables
+	private static final List<String> SYSTEM_TABLE_NAMES = new ImmutableList.Builder<String>()
+			.add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)
+			.add(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)
+			.add(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME).build();
+
+	public MetadataRpcController(PayloadCarryingRpcController delegate,
+			Configuration conf) {
+		super(delegate);
+		this.priority = PhoenixRpcSchedulerFactory.getMetadataPriority(conf);
+	}
+
+	@Override
+	public void setPriority(final TableName tn) {
+		if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) {
+			setPriority(this.priority);
+		} else {
+			super.setPriority(tn);
+		}
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
new file mode 100644
index 0000000..8c17eda
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+/**
+ * {@link RpcControllerFactory} that sets the priority of index and metadata rpc calls
+ * so that they are each processed in their own queues
+ */
+public class ServerRpcControllerFactory extends RpcControllerFactory {
+
+    public ServerRpcControllerFactory(Configuration conf) {
+        super(conf);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController() {
+        PayloadCarryingRpcController delegate = super.newController();
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
+        PayloadCarryingRpcController delegate = super.newController(cellScanner);
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables) {
+        PayloadCarryingRpcController delegate = super.newController(cellIterables);
+        return getController(delegate);
+    }
+    
+    private PayloadCarryingRpcController getController(PayloadCarryingRpcController delegate) {
+    	// construct a chain of controllers: metadata, index and standard controller
+    	IndexRpcController indexRpcController = new IndexRpcController(delegate, conf);
+		return new MetadataRpcController(indexRpcController, conf);
+    }
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
deleted file mode 100644
index a192feb..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index;
-
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
-import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
-import org.apache.phoenix.util.SchemaUtil;
-
-/**
- * {@link RpcControllerFactory} that overrides the standard {@link PayloadCarryingRpcController} to
- * allow the configured index tables (via {@link #INDEX_TABLE_NAMES_KEY}) to use the Index priority.
- */
-public class IndexQosRpcControllerFactory extends RpcControllerFactory {
-
-    public static final String INDEX_TABLE_NAMES_KEY = "phoenix.index.rpc.controller.index-tables";
-
-    public IndexQosRpcControllerFactory(Configuration conf) {
-        super(conf);
-    }
-
-    @Override
-    public PayloadCarryingRpcController newController() {
-        PayloadCarryingRpcController delegate = super.newController();
-        return new IndexQosRpcController(delegate, conf);
-    }
-
-    @Override
-    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
-        PayloadCarryingRpcController delegate = super.newController(cellScanner);
-        return new IndexQosRpcController(delegate, conf);
-    }
-
-    @Override
-    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables) {
-        PayloadCarryingRpcController delegate = super.newController(cellIterables);
-        return new IndexQosRpcController(delegate, conf);
-    }
-
-    private class IndexQosRpcController extends DelegatingPayloadCarryingRpcController {
-
-        private int priority;
-
-        public IndexQosRpcController(PayloadCarryingRpcController delegate, Configuration conf) {
-            super(delegate);
-            this.priority = PhoenixIndexRpcSchedulerFactory.getMinPriority(conf);
-        }
-        @Override
-        public void setPriority(final TableName tn) {
-            // if its an index table, then we override to the index priority
-            if (!tn.isSystemTable() &&  !SchemaUtil.isSystemDataTable(tn.getNameAsString())) {
-                setPriority(this.priority);
-            } 
-            else {
-                super.setPriority(tn);
-            }
-        }
-
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
deleted file mode 100644
index 1789b0e..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.ipc;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
-import org.apache.hadoop.hbase.ipc.PriorityFunction;
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
-import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Factory to create a {@link PhoenixIndexRpcScheduler}. In this package so we can access the
- * {@link SimpleRpcSchedulerFactory}.
- */
-public class PhoenixIndexRpcSchedulerFactory implements RpcSchedulerFactory {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixIndexRpcSchedulerFactory.class);
-
-    private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
-            "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
-
-    @Override
-    public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
-        // create the delegate scheduler
-        RpcScheduler delegate;
-        try {
-            // happens in <=0.98.4 where the scheduler factory is not visible
-            delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
-        } catch (IllegalAccessError e) {
-            LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
-            throw e;
-        }
-
-        int indexHandlerCount = conf.getInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_HANDLER_COUNT);
-        int minPriority = getMinPriority(conf);
-        int maxPriority = conf.getInt(QueryServices.MAX_INDEX_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_MAX_PRIORITY);
-        // make sure the ranges are outside the warning ranges
-        Preconditions.checkArgument(maxPriority > minPriority, "Max index priority (" + maxPriority
-                + ") must be larger than min priority (" + minPriority + ")");
-        boolean allSmaller =
-                minPriority < HConstants.REPLICATION_QOS
-                        && maxPriority < HConstants.REPLICATION_QOS;
-        boolean allLarger = minPriority > HConstants.HIGH_QOS;
-        Preconditions.checkArgument(allSmaller || allLarger, "Index priority range (" + minPriority
-                + ",  " + maxPriority + ") must be outside HBase priority range ("
-                + HConstants.REPLICATION_QOS + ", " + HConstants.HIGH_QOS + ")");
-
-        LOG.info("Using custom Phoenix Index RPC Handling with " + indexHandlerCount
-                + " handlers and priority range [" + minPriority + ", " + maxPriority + ")");
-
-        PhoenixIndexRpcScheduler scheduler =
-                new PhoenixIndexRpcScheduler(indexHandlerCount, conf, delegate, minPriority,
-                        maxPriority);
-        return scheduler;
-    }
-
-    @Override
-    public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) {
-        return create(configuration, priorityFunction, null);
-    }
-
-    public static int getMinPriority(Configuration conf) {
-        return conf.getInt(QueryServices.MIN_INDEX_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 15bcfd0..1b8b57d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -279,10 +279,6 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     /** Version below which we fall back on the generic KeyValueBuilder */
     public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = VersionUtil.encodeVersion("0", "94", "14");
     
-    // list of system tables
-    public static final List<String> SYSTEM_TABLE_NAMES = new ImmutableList.Builder<String>().add(SYSTEM_CATALOG_NAME)
-            .add(SYSTEM_STATS_NAME).add(SEQUENCE_FULLNAME).build();
-    
     PhoenixDatabaseMetaData(PhoenixConnection connection) throws SQLException {
         this.emptyResultSet = new PhoenixResultSet(ResultIterator.EMPTY_ITERATOR, RowProjector.EMPTY_PROJECTOR, new PhoenixStatement(connection));
         this.connection = connection;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 2eab5dd..65f6acf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -123,9 +123,8 @@ public interface QueryServices extends SQLCloseable {
     // Index will be partially re-built from index disable time stamp - following overlap time
     public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB =
         "phoenix.index.failure.handling.rebuild.overlap.time";
-    public static final String MIN_INDEX_PRIOIRTY_ATTRIB = "phoenix.regionserver.index.priority.min";
-    public static final String MAX_INDEX_PRIOIRTY_ATTRIB = "phoenix.regionserver.index.priority.max";
-    public static final String INDEX_HANDLER_COUNT_ATTRIB = "phoenix.regionserver.index.handler.count";
+    public static final String INDEX_PRIOIRTY_ATTRIB = "phoenix.index.rpc.priority";
+    public static final String METADATA_PRIOIRTY_ATTRIB = "phoenix.metadata.rpc.priority";
     public static final String ALLOW_LOCAL_INDEX_ATTRIB = "phoenix.index.allowLocalIndex";
 
     // Config parameters for for configuring tracing

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 8cd740a..97040d2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -41,6 +41,7 @@ import static org.apache.phoenix.query.QueryServices.MAX_SERVER_CACHE_TIME_TO_LI
 import static org.apache.phoenix.query.QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.METRICS_ENABLED;
 import static org.apache.phoenix.query.QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK;
@@ -61,12 +62,13 @@ import static org.apache.phoenix.query.QueryServices.STATS_USE_CURRENT_TIME_ATTR
 import static org.apache.phoenix.query.QueryServices.THREAD_POOL_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.THREAD_TIMEOUT_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.USE_INDEXES_ATTRIB;
-import static org.apache.phoenix.query.QueryServices.METRICS_ENABLED;
 
 import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.trace.util.Tracing;
@@ -138,13 +140,12 @@ public class QueryServicesOptions {
     public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 10000; // 10 secs
     public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME = 300000; // 5 mins
 
-    public static final int DEFAULT_INDEX_MAX_PRIORITY = 1050;
     /**
      * HConstants#HIGH_QOS is the max we will see to a standard table. We go higher to differentiate
      * and give some room for things in the middle
      */
-    public static final int DEFAULT_INDEX_MIN_PRIORITY = 1000;
-    public static final int DEFAULT_INDEX_HANDLER_COUNT = 30;
+    public static final int DEFAULT_INDEX_PRIORITY = 1000;
+    public static final int DEFAULT_METADATA_PRIORITY = 2000;
     public static final boolean DEFAULT_ALLOW_LOCAL_INDEX = true;
 
     public static final int DEFAULT_TRACING_PAGE_SIZE = 100;
@@ -235,7 +236,8 @@ public class QueryServicesOptions {
             .setIfUnset(ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE)
             .setIfUnset(NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK)
             .setIfUnset(DELAY_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK)
-            .setIfUnset(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED);
+            .setIfUnset(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED)
+            .setIfUnset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ClientRpcControllerFactory.class.getName());
             ;
         // HBase sets this to 1, so we reset it to something more appropriate.
         // Hopefully HBase will change this, because we can't know if a user set

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 4a8341d..46da726 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -404,13 +404,6 @@ public class SchemaUtil {
         return false;
     }
     
-    /**
-     * Returns true if the given table is a system table (does not include future system indexes)
-     */
-    public static boolean isSystemDataTable(String fullTableName) {
-    	return PhoenixDatabaseMetaData.SYSTEM_TABLE_NAMES.contains(fullTableName);
-    }
-
     // Given the splits and the rowKeySchema, find out the keys that 
     public static byte[][] processSplits(byte[][] splits, LinkedHashSet<PColumn> pkColumns, Integer saltBucketNum, boolean defaultRowKeyOrder) throws SQLException {
         // FIXME: shouldn't this return if splits.length == 0?

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
index 8bd8c11..12f1863 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
@@ -44,9 +44,9 @@ public class PhoenixIndexRpcSchedulerTest {
     public void testIndexPriorityWritesToIndexHandler() throws Exception {
         RpcScheduler mock = Mockito.mock(RpcScheduler.class);
 
-        PhoenixIndexRpcScheduler scheduler = new PhoenixIndexRpcScheduler(10, conf, mock, 200, 250);
+        PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, mock, 200, 250);
         BalancedQueueRpcExecutor executor = new BalancedQueueRpcExecutor("test-queue", 1, 1, 1);
-        scheduler.setExecutorForTesting(executor);
+        scheduler.setIndexExecutorForTesting(executor);
         dispatchCallWithPriority(scheduler, 200);
         List<BlockingQueue<CallRunner>> queues = executor.getQueues();
         assertEquals(1, queues.size());
@@ -54,8 +54,8 @@ public class PhoenixIndexRpcSchedulerTest {
         queue.poll(20, TimeUnit.SECONDS);
 
         // try again, this time we tweak the ranges we support
-        scheduler = new PhoenixIndexRpcScheduler(10, conf, mock, 101, 110);
-        scheduler.setExecutorForTesting(executor);
+        scheduler = new PhoenixRpcScheduler(conf, mock, 101, 110);
+        scheduler.setIndexExecutorForTesting(executor);
         dispatchCallWithPriority(scheduler, 101);
         queue.poll(20, TimeUnit.SECONDS);
 
@@ -71,14 +71,14 @@ public class PhoenixIndexRpcSchedulerTest {
     @Test
     public void testDelegateWhenOutsideRange() throws Exception {
         RpcScheduler mock = Mockito.mock(RpcScheduler.class);
-        PhoenixIndexRpcScheduler scheduler = new PhoenixIndexRpcScheduler(10, conf, mock, 200, 250);
+        PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, mock, 200, 250);
         dispatchCallWithPriority(scheduler, 100);
-        dispatchCallWithPriority(scheduler, 250);
+        dispatchCallWithPriority(scheduler, 251);
 
         // try again, this time we tweak the ranges we support
-        scheduler = new PhoenixIndexRpcScheduler(10, conf, mock, 101, 110);
+        scheduler = new PhoenixRpcScheduler(conf, mock, 101, 110);
         dispatchCallWithPriority(scheduler, 200);
-        dispatchCallWithPriority(scheduler, 110);
+        dispatchCallWithPriority(scheduler, 111);
 
         Mockito.verify(mock, Mockito.times(4)).init(Mockito.any(Context.class));
         Mockito.verify(mock, Mockito.times(4)).dispatch(Mockito.any(CallRunner.class));


[13/31] phoenix git commit: PHOENIX-1770 Correct exit code from bin scripts

Posted by ap...@apache.org.
PHOENIX-1770 Correct exit code from bin scripts

Make the python scripts under bin/ exit with the exit code that
was returned from the underlying java command.

Contributed by Mark Tse.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4d716100
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4d716100
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4d716100

Branch: refs/heads/4.x-HBase-1.x
Commit: 4d71610081903a535767f1a462ba47e1ffec5191
Parents: 6cb6a37
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Thu Mar 26 08:43:48 2015 +0100
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Thu Mar 26 08:47:15 2015 +0100

----------------------------------------------------------------------
 bin/end2endTest.py |  3 ++-
 bin/performance.py | 13 ++++++++++---
 bin/psql.py        |  3 ++-
 3 files changed, 14 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4d716100/bin/end2endTest.py
----------------------------------------------------------------------
diff --git a/bin/end2endTest.py b/bin/end2endTest.py
index 96886c7..a5993dc 100755
--- a/bin/end2endTest.py
+++ b/bin/end2endTest.py
@@ -44,4 +44,5 @@ java_cmd = "java -cp " + hbase_config_path + os.pathsep + phoenix_jar_path + os.
     hbase_library_path + " org.apache.phoenix.end2end.End2EndTestDriver " + \
     ' '.join(sys.argv[1:])
 
-subprocess.call(java_cmd, shell=True)
+exitcode = subprocess.call(java_cmd, shell=True)
+sys.exit(exitcode)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4d716100/bin/performance.py
----------------------------------------------------------------------
diff --git a/bin/performance.py b/bin/performance.py
index c69edfd..b9df433 100755
--- a/bin/performance.py
+++ b/bin/performance.py
@@ -85,7 +85,9 @@ print "-----------------------------------------"
 print "\nCreating performance table..."
 createFileWithContent(ddl, createtable)
 
-subprocess.call(execute + ddl, shell=True)
+exitcode = subprocess.call(execute + ddl, shell=True)
+if exitcode != 0:
+    sys.exit(exitcode)
 
 # Write real,user,sys time on console for the following queries
 queryex("1 - Count", "SELECT COUNT(1) FROM %s;" % (table))
@@ -95,11 +97,16 @@ queryex("4 - Truncate + Group By", "SELECT TRUNC(DATE,'DAY') DAY FROM %s GROUP B
 queryex("5 - Filter + Count", "SELECT COUNT(1) FROM %s WHERE CORE<10;" % (table))
 
 print "\nGenerating and upserting data..."
-subprocess.call('java -jar %s %s' % (phoenix_utils.testjar, rowcount), shell=True)
+exitcode = subprocess.call('java -jar %s %s' % (phoenix_utils.testjar, rowcount), shell=True)
+if exitcode != 0:
+    sys.exit(exitcode)
+
 print "\n"
 createFileWithContent(qry, statements)
 
-subprocess.call(execute + data + ' ' + qry, shell=True)
+exitcode = subprocess.call(execute + data + ' ' + qry, shell=True)
+if exitcode != 0:
+    sys.exit(exitcode)
 
 # clear temporary files
 delfile(ddl)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4d716100/bin/psql.py
----------------------------------------------------------------------
diff --git a/bin/psql.py b/bin/psql.py
index 34a95df..247001a 100755
--- a/bin/psql.py
+++ b/bin/psql.py
@@ -39,4 +39,5 @@ java_cmd = 'java -cp "' + phoenix_utils.hbase_conf_path + os.pathsep + phoenix_u
     os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
     " org.apache.phoenix.util.PhoenixRuntime " + args 
 
-subprocess.call(java_cmd, shell=True)
+exitcode = subprocess.call(java_cmd, shell=True)
+sys.exit(exitcode)


[11/31] phoenix git commit: PHOENIX-1684 Functional Index using REGEXP_SUBSTR doesn't work correctly

Posted by ap...@apache.org.
PHOENIX-1684 Functional Index using REGEXP_SUBSTR doesn't work correctly


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a8b27e3f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a8b27e3f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a8b27e3f

Branch: refs/heads/4.x-HBase-1.x
Commit: a8b27e3f010d15d3f3b519c38fbb052ebb4a6cdb
Parents: 250474d
Author: Thomas <td...@salesforce.com>
Authored: Thu Mar 19 13:57:27 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Tue Mar 24 15:56:25 2015 -0700

----------------------------------------------------------------------
 .../end2end/index/IndexExpressionIT.java        | 161 ++++++++++++++-----
 .../phoenix/compile/PostIndexDDLCompiler.java   |   4 +-
 .../parse/IndexExpressionParseNodeRewriter.java |  30 +---
 .../apache/phoenix/schema/MetaDataClient.java   |   4 +-
 .../org/apache/phoenix/util/StringUtil.java     |   5 +
 .../phoenix/compile/QueryCompilerTest.java      |  22 ++-
 6 files changed, 153 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8b27e3f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
index 1e3733b..0203e35 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -1202,54 +1202,60 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
     
     @Test
     public void testViewUsesTableIndex() throws Exception {
-        ResultSet rs;
         Connection conn = DriverManager.getConnection(getUrl());
-        String ddl = "CREATE TABLE t (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, s1 VARCHAR, s2 VARCHAR, s3 VARCHAR, s4 VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2))";
-        conn.createStatement().execute(ddl);
-        conn.createStatement().execute("CREATE INDEX i1 ON t(k2, s2, s3, s1)");
-        conn.createStatement().execute("CREATE INDEX i2 ON t(k2, s2||'_'||s3, s1, s4)");
-        
-        ddl = "CREATE VIEW v AS SELECT * FROM t WHERE s1 = 'foo'";
-        conn.createStatement().execute(ddl);
-        conn.createStatement().execute("UPSERT INTO t VALUES(1,1,'foo','abc','cab')");
-        conn.createStatement().execute("UPSERT INTO t VALUES(2,2,'bar','xyz','zyx')");
-        conn.commit();
-        
-        rs = conn.createStatement().executeQuery("SELECT count(*) FROM v");
-        assertTrue(rs.next());
-        assertEquals(1, rs.getLong(1));
-        assertFalse(rs.next());
-        
-        //i2 should be used since it contains s3||'_'||s4 i
-        String query = "SELECT s2||'_'||s3 FROM v WHERE k2=1 AND (s2||'_'||s3)='abc_cab'";
-        rs = conn.createStatement(  ).executeQuery("EXPLAIN " + query);
-        String queryPlan = QueryUtil.getExplainPlan(rs);
-        assertEquals(
-                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER I2 [1,'abc_cab','foo']\n" + 
-                "    SERVER FILTER BY FIRST KEY ONLY", queryPlan);
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("abc_cab", rs.getString(1));
-        assertFalse(rs.next());
-        
-        conn.createStatement().execute("ALTER VIEW v DROP COLUMN s4");
-        //i2 cannot be used since s4 has been dropped from the view, so i1 will be used 
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        queryPlan = QueryUtil.getExplainPlan(rs);
-        assertEquals(
-                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER I1 [1]\n" + 
-                "    SERVER FILTER BY FIRST KEY ONLY AND ((\"S2\" || '_' || \"S3\") = 'abc_cab' AND \"S1\" = 'foo')", queryPlan);
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("abc_cab", rs.getString(1));
-        assertFalse(rs.next());    
+        try 
+        {
+        	ResultSet rs;
+	        String ddl = "CREATE TABLE t (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, s1 VARCHAR, s2 VARCHAR, s3 VARCHAR, s4 VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2))";
+	        conn.createStatement().execute(ddl);
+	        conn.createStatement().execute("CREATE INDEX i1 ON t(k2, s2, s3, s1)");
+	        conn.createStatement().execute("CREATE INDEX i2 ON t(k2, s2||'_'||s3, s1, s4)");
+	        
+	        ddl = "CREATE VIEW v AS SELECT * FROM t WHERE s1 = 'foo'";
+	        conn.createStatement().execute(ddl);
+	        conn.createStatement().execute("UPSERT INTO t VALUES(1,1,'foo','abc','cab')");
+	        conn.createStatement().execute("UPSERT INTO t VALUES(2,2,'bar','xyz','zyx')");
+	        conn.commit();
+	        
+	        rs = conn.createStatement().executeQuery("SELECT count(*) FROM v");
+	        assertTrue(rs.next());
+	        assertEquals(1, rs.getLong(1));
+	        assertFalse(rs.next());
+	        
+	        //i2 should be used since it contains s3||'_'||s4 i
+	        String query = "SELECT s2||'_'||s3 FROM v WHERE k2=1 AND (s2||'_'||s3)='abc_cab'";
+	        rs = conn.createStatement(  ).executeQuery("EXPLAIN " + query);
+	        String queryPlan = QueryUtil.getExplainPlan(rs);
+	        assertEquals(
+	                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER I2 [1,'abc_cab','foo']\n" + 
+	                "    SERVER FILTER BY FIRST KEY ONLY", queryPlan);
+	        rs = conn.createStatement().executeQuery(query);
+	        assertTrue(rs.next());
+	        assertEquals("abc_cab", rs.getString(1));
+	        assertFalse(rs.next());
+	        
+	        conn.createStatement().execute("ALTER VIEW v DROP COLUMN s4");
+	        //i2 cannot be used since s4 has been dropped from the view, so i1 will be used 
+	        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+	        queryPlan = QueryUtil.getExplainPlan(rs);
+	        assertEquals(
+	                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER I1 [1]\n" + 
+	                "    SERVER FILTER BY FIRST KEY ONLY AND ((\"S2\" || '_' || \"S3\") = 'abc_cab' AND \"S1\" = 'foo')", queryPlan);
+	        rs = conn.createStatement().executeQuery(query);
+	        assertTrue(rs.next());
+	        assertEquals("abc_cab", rs.getString(1));
+	        assertFalse(rs.next());    
+        }
+        finally {
+        	conn.close();
+        }
     }
     
 	@Test
 	public void testExpressionThrowsException() throws Exception {
 		Connection conn = DriverManager.getConnection(getUrl());
-		String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY, k2 INTEGER)";
 		try {
+			String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY, k2 INTEGER)";
 			conn.createStatement().execute(ddl);
 			ddl = "CREATE INDEX i on t(k1/k2)";
 			conn.createStatement().execute(ddl);
@@ -1261,6 +1267,79 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
 			conn.commit();
 			fail();
 		} catch (CommitException e) {
+		} finally {
+			conn.close();
+		}
+	}
+	
+	@Test
+	public void testImmutableCaseSensitiveFunctionIndex() throws Exception {
+		helpTestCaseSensitiveFunctionIndex(false, false);
+	}
+
+	@Test
+	public void testImmutableLocalCaseSensitiveFunctionIndex() throws Exception {
+		helpTestCaseSensitiveFunctionIndex(false, true);
+	}
+
+	@Test
+	public void testMutableCaseSensitiveFunctionIndex() throws Exception {
+		helpTestCaseSensitiveFunctionIndex(true, false);
+	}
+
+	@Test
+	public void testMutableLocalCaseSensitiveFunctionIndex() throws Exception {
+		helpTestCaseSensitiveFunctionIndex(true, true);
+	}
+
+	protected void helpTestCaseSensitiveFunctionIndex(boolean mutable,
+			boolean localIndex) throws Exception {
+		Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+		Connection conn = DriverManager.getConnection(getUrl(), props);
+		try {
+			conn.createStatement().execute(
+					"CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) "
+							+ (mutable ? "IMMUTABLE_ROWS=true" : ""));
+			String query = "SELECT * FROM t";
+			ResultSet rs = conn.createStatement().executeQuery(query);
+			assertFalse(rs.next());
+			String ddl = "CREATE " + (localIndex ? "LOCAL" : "")
+					+ " INDEX idx ON t (REGEXP_SUBSTR(v,'id:\\\\w+'))";
+			PreparedStatement stmt = conn.prepareStatement(ddl);
+			stmt.execute();
+			query = "SELECT * FROM idx";
+			rs = conn.createStatement().executeQuery(query);
+			assertFalse(rs.next());
+
+			stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?)");
+			stmt.setString(1, "k1");
+			stmt.setString(2, "{id:id1}");
+			stmt.execute();
+			stmt.setString(1, "k2");
+			stmt.setString(2, "{id:id2}");
+			stmt.execute();
+			conn.commit();
+			
+			query = "SELECT k FROM t WHERE REGEXP_SUBSTR(v,'id:\\\\w+') = 'id:id1'";
+			rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+			if (localIndex) {
+				assertEquals(
+						"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_T [-32768,'id:id1']\n"
+								+ "    SERVER FILTER BY FIRST KEY ONLY\nCLIENT MERGE SORT",
+						QueryUtil.getExplainPlan(rs));
+			} else {
+				assertEquals(
+						"CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX ['id:id1']\n"
+								+ "    SERVER FILTER BY FIRST KEY ONLY",
+						QueryUtil.getExplainPlan(rs));
+			}
+
+			rs = conn.createStatement().executeQuery(query);
+			assertTrue(rs.next());
+			assertEquals("k1", rs.getString(1));
+			assertFalse(rs.next());
+		} finally {
+			conn.close();
 		}
 	}
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8b27e3f/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
index 5836b99..9f99f1c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
@@ -27,6 +27,7 @@ import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.StringUtil;
 
 import com.google.common.collect.Lists;
 
@@ -73,7 +74,8 @@ public class PostIndexDDLCompiler {
         for (int i = posOffset; i < nIndexPKColumns; i++) {
             PColumn col = indexPKColumns.get(i);
             String indexColName = col.getName().getString();
-            String dataColName = col.getExpressionStr();
+            // need to escape backslash as this used in the SELECT statement
+            String dataColName = StringUtil.escapeBackslash(col.getExpressionStr());
             dataColumns.append(dataColName).append(",");
             indexColumns.append('"').append(indexColName).append("\",");
             indexColumnNames.add(indexColName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8b27e3f/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
index 43cb9f3..0273041 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java
@@ -37,26 +37,6 @@ public class IndexExpressionParseNodeRewriter extends ParseNodeRewriter {
 
     private final Map<ParseNode, ParseNode> indexedParseNodeToColumnParseNodeMap;
     
-    private static class ColumnParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor {
-        
-        private boolean isParseNodeCaseSensitive;
-        
-        public void reset() {
-            this.isParseNodeCaseSensitive = false;
-        }
-        
-        @Override
-        public Void visit(ColumnParseNode node) throws SQLException {
-            isParseNodeCaseSensitive = isParseNodeCaseSensitive  || node.isCaseSensitive() || node.isTableNameCaseSensitive();
-            return null;
-        }
-        
-        public boolean isParseNodeCaseSensitive() {
-            return isParseNodeCaseSensitive;
-        }
-        
-    }
-
     public IndexExpressionParseNodeRewriter(PTable index, PhoenixConnection connection) throws SQLException {
         indexedParseNodeToColumnParseNodeMap = Maps.newHashMapWithExpectedSize(index.getColumns().size());
         NamedTableNode tableNode = NamedTableNode.create(null,
@@ -66,21 +46,13 @@ public class IndexExpressionParseNodeRewriter extends ParseNodeRewriter {
         StatementContext context = new StatementContext(new PhoenixStatement(connection), dataResolver);
         IndexStatementRewriter rewriter = new IndexStatementRewriter(dataResolver, null);
         ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
-        ColumnParseNodeVisitor columnParseNodeVisitor = new ColumnParseNodeVisitor();
         int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (index.isMultiTenant() ? 1 : 0) + (index.getViewIndexId() == null ? 0 : 1);
         List<PColumn> pkColumns = index.getPKColumns();
 		for (int i=indexPosOffset; i<pkColumns.size(); ++i) {
         	PColumn column = pkColumns.get(i);
         	String expressionStr = IndexUtil.getIndexColumnExpressionStr(column);
             ParseNode expressionParseNode  = SQLParser.parseCondition(expressionStr);
-            columnParseNodeVisitor.reset();
-            expressionParseNode.accept(columnParseNodeVisitor);
-            String colName = column.getName().getString();
-            if (columnParseNodeVisitor.isParseNodeCaseSensitive()) {
-                // force column name to be case sensitive name by surround with double quotes
-                colName = "\"" + colName + "\"";
-            }
-            
+            String colName = "\"" + column.getName().getString() + "\"";
             Expression dataExpression = expressionParseNode.accept(expressionCompiler);
             PDataType expressionDataType = dataExpression.getDataType();
             ParseNode indexedParseNode = expressionParseNode.accept(rewriter);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8b27e3f/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 2ba0cde..e414039 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -164,6 +164,7 @@ import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.StringUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -1036,7 +1037,8 @@ public class MetaDataClient {
                     // can lose information during compilation
                     StringBuilder buf = new StringBuilder();
                     parseNode.toSQL(resolver, buf);
-                    String expressionStr = buf.toString();
+                    // need to escape backslash as this expression will be re-parsed later
+                    String expressionStr = StringUtil.escapeBackslash(buf.toString());
                     
                     ColumnName colName = null;
                     ColumnRef colRef = expressionIndexCompiler.getColumnRef();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8b27e3f/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
index a83098a..4a7ae38 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
@@ -331,4 +331,9 @@ public class StringUtil {
     public static String escapeStringConstant(String pattern) {
         return StringEscapeUtils.escapeSql(pattern); // Need to escape double quotes
     }   
+    
+    public static String escapeBackslash(String input) {
+    	// see http://stackoverflow.com/questions/4653831/regex-how-to-escape-backslashes-and-special-characters
+    	return input.replaceAll("\\\\","\\\\\\\\");
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8b27e3f/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index e17c528..83c984b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -1564,5 +1564,25 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
             stmt.close();
         }
     }
-
+    
+    @Test
+    public void testRegex() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        Statement stmt = conn.createStatement();
+        stmt.execute("CREATE TABLE t (k1 INTEGER PRIMARY KEY, v VARCHAR)");
+        
+        //character classes
+        stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[abc]') = 'val'");
+        stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[^abc]') = 'val'");
+        stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-zA-Z]') = 'val'");
+        stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-d[m-p]]') = 'val'");
+        stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[def]]') = 'val'");
+        stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[^bc]]') = 'val'");
+        stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[^m-p]]') = 'val'");
+        
+        // predefined character classes
+        stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '.\\\\d\\\\D\\\\s\\\\S\\\\w\\\\W') = 'val'");
+    }
+    
+   
 }


[20/31] phoenix git commit: PHOENIX-1722 Speedup CONVERT_TZ function add JodaTimezoneCache (Vaclav Loffelmann)

Posted by ap...@apache.org.
PHOENIX-1722 Speedup CONVERT_TZ function add JodaTimezoneCache (Vaclav Loffelmann)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fcedbe6a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fcedbe6a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fcedbe6a

Branch: refs/heads/4.x-HBase-1.x
Commit: fcedbe6a492faa7cc39fe0ed7c4c24c7d41db1a5
Parents: 4248be3
Author: Thomas <td...@salesforce.com>
Authored: Fri Mar 27 15:32:38 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Fri Mar 27 15:32:38 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/cache/JodaTimezoneCache.java | 84 ++++++++++++++++++++
 .../phoenix/cache/JodaTimezoneCacheTest.java    | 51 ++++++++++++
 2 files changed, 135 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fcedbe6a/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
new file mode 100644
index 0000000..54904d7
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2015 Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.cache;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.UncheckedExecutionException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.IllegalDataException;
+import org.joda.time.DateTimeZone;
+
+public class JodaTimezoneCache {
+
+    public static final int CACHE_EXPRIRE_TIME_MINUTES = 10;
+    private static final LoadingCache<ByteBuffer, DateTimeZone> cachedJodaTimeZones = createTimezoneCache();
+
+    /**
+     * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+     *
+     * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+     * @return joda's DateTimeZone instance
+     * @throws IllegalDataException if unknown timezone id is passed
+     */
+    public static DateTimeZone getInstance(ByteBuffer timezoneId) {
+        try {
+            return cachedJodaTimeZones.get(timezoneId);
+        } catch (ExecutionException ex) {
+            throw new IllegalDataException(ex);
+        } catch (UncheckedExecutionException e) {
+            throw new IllegalDataException("Unknown timezone " + Bytes.toString(timezoneId.array()));
+        }
+    }
+
+    /**
+     * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+     *
+     * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+     * @return joda's DateTimeZone instance
+     * @throws IllegalDataException if unknown timezone id is passed
+     */
+    public static DateTimeZone getInstance(ImmutableBytesWritable timezoneId) {
+        return getInstance(ByteBuffer.wrap(timezoneId.copyBytes()));
+    }
+
+    /**
+     * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+     *
+     * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+     * @return joda's DateTimeZone instance
+     * @throws IllegalDataException if unknown timezone id is passed
+     */
+    public static DateTimeZone getInstance(String timezoneId) {
+        return getInstance(ByteBuffer.wrap(Bytes.toBytes(timezoneId)));
+    }
+
+    private static LoadingCache<ByteBuffer, DateTimeZone> createTimezoneCache() {
+        return CacheBuilder.newBuilder().expireAfterAccess(CACHE_EXPRIRE_TIME_MINUTES, TimeUnit.MINUTES).build(new CacheLoader<ByteBuffer, DateTimeZone>() {
+
+            @Override
+            public DateTimeZone load(ByteBuffer timezone) throws Exception {
+                return DateTimeZone.forID(Bytes.toString(timezone.array()));
+            }
+        });
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fcedbe6a/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java b/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
new file mode 100644
index 0000000..f388703
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.cache;
+
+import java.nio.ByteBuffer;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.IllegalDataException;
+import org.joda.time.DateTimeZone;
+import static org.junit.Assert.assertTrue;
+import org.junit.Test;
+
+public class JodaTimezoneCacheTest {
+
+    @Test
+    public void testGetInstanceByteBufferUTC() {
+        DateTimeZone instance = JodaTimezoneCache.getInstance(ByteBuffer.wrap(Bytes.toBytes("UTC")));
+        assertTrue(instance instanceof DateTimeZone);
+    }
+
+    @Test
+    public void testGetInstanceString() {
+        DateTimeZone instance = JodaTimezoneCache.getInstance("America/St_Vincent");
+        assertTrue(instance instanceof DateTimeZone);
+    }
+
+    @Test(expected = IllegalDataException.class)
+    public void testGetInstanceStringUnknown() {
+        JodaTimezoneCache.getInstance("SOME_UNKNOWN_TIMEZONE");
+    }
+
+    @Test
+    public void testGetInstanceImmutableBytesWritable() {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable(Bytes.toBytes("Europe/Isle_of_Man"));
+        DateTimeZone instance = JodaTimezoneCache.getInstance(ptr);
+        assertTrue(instance instanceof DateTimeZone);
+    }
+}


[16/31] phoenix git commit: PHOENIX-1457 Use high priority queue for metadata endpoint calls

Posted by ap...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
deleted file mode 100644
index 7d08c0d..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
-import org.apache.phoenix.query.QueryServices;
-import org.junit.Test;
-
-public class PhoenixIndexRpcSchedulerFactoryTest {
-
-    @Test
-    public void ensureInstantiation() throws Exception {
-        Configuration conf = new Configuration(false);
-        conf.setClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-            PhoenixIndexRpcSchedulerFactory.class, RpcSchedulerFactory.class);
-        // kinda lame that we copy the copy from the regionserver to do this and can't use a static
-        // method, but meh
-        try {
-            Class<?> rpcSchedulerFactoryClass =
-                    conf.getClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-                        SimpleRpcSchedulerFactory.class);
-            Object o = rpcSchedulerFactoryClass.newInstance();
-            assertTrue(o instanceof PhoenixIndexRpcSchedulerFactory);
-        } catch (InstantiationException e) {
-            assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e,
-                false);
-        } catch (IllegalAccessException e) {
-            assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e,
-                false);
-        }
-    }
-
-    /**
-     * Ensure that we can't configure the index priority ranges inside the hbase ranges
-     * @throws Exception
-     */
-    @Test
-    public void testValidateIndexPriorityRanges() throws Exception {
-        Configuration conf = new Configuration(false);
-        // standard configs should be fine
-        PhoenixIndexRpcSchedulerFactory factory = new PhoenixIndexRpcSchedulerFactory();
-        factory.create(conf, null);
-
-        setMinMax(conf, 0, 4);
-        factory.create(conf, null);
-
-        setMinMax(conf, 201, 202);
-        factory.create(conf, null);
-
-        setMinMax(conf, 102, 101);
-        try {
-            factory.create(conf, null);
-            fail("Should not have allowed max less than min");
-        } catch (IllegalArgumentException e) {
-            // expected
-        }
-
-        setMinMax(conf, 5, 6);
-        try {
-            factory.create(conf, null);
-            fail("Should not have allowed min in range");
-        } catch (IllegalArgumentException e) {
-            // expected
-        }
-
-        setMinMax(conf, 6, 60);
-        try {
-            factory.create(conf, null);
-            fail("Should not have allowed min/max in hbase range");
-        } catch (IllegalArgumentException e) {
-            // expected
-        }
-
-        setMinMax(conf, 6, 101);
-        try {
-            factory.create(conf, null);
-            fail("Should not have allowed in range");
-        } catch (IllegalArgumentException e) {
-            // expected
-        }
-    }
-
-    private void setMinMax(Configuration conf, int min, int max) {
-        conf.setInt(QueryServices.MIN_INDEX_PRIOIRTY_ATTRIB, min);
-        conf.setInt(QueryServices.MAX_INDEX_PRIOIRTY_ATTRIB, max);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java
new file mode 100644
index 0000000..eb28c8d
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.junit.Test;
+
+public class PhoenixRpcSchedulerFactoryTest {
+
+    @Test
+    public void ensureInstantiation() throws Exception {
+        Configuration conf = new Configuration(false);
+        conf.setClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+            PhoenixRpcSchedulerFactory.class, RpcSchedulerFactory.class);
+        // kinda lame that we copy the copy from the regionserver to do this and can't use a static
+        // method, but meh
+        try {
+            Class<?> rpcSchedulerFactoryClass =
+                    conf.getClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+                        SimpleRpcSchedulerFactory.class);
+            Object o = rpcSchedulerFactoryClass.newInstance();
+            assertTrue(o instanceof PhoenixRpcSchedulerFactory);
+        } catch (InstantiationException e) {
+            assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e,
+                false);
+        } catch (IllegalAccessException e) {
+            assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e,
+                false);
+        }
+    }
+
+    /**
+     * Ensure that we can't configure the index and metadata priority ranges inside the hbase ranges
+     * @throws Exception
+     */
+    @Test
+    public void testValidateRpcPriorityRanges() throws Exception {
+        Configuration conf = new Configuration(false);
+        // standard configs should be fine
+        PhoenixRpcSchedulerFactory factory = new PhoenixRpcSchedulerFactory();
+        factory.create(conf, null);
+
+        // test priorities less than HBase range
+        setPriorities(conf, -4, -1);
+        factory.create(conf, null);
+
+        // test priorities greater than HBase range
+        setPriorities(conf, 1001, 1002);
+        factory.create(conf, null);
+
+        // test priorities in HBase range
+        setPriorities(conf, 1, 201);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        setPriorities(conf, 1001, 1);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        
+        // test priorities in HBase range
+        setPriorities(conf, 1001, HConstants.NORMAL_QOS);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        setPriorities(conf, HConstants.NORMAL_QOS, 1001);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        
+        // test priorities in HBase range
+        setPriorities(conf, 1001, HConstants.HIGH_QOS);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        setPriorities(conf, HConstants.HIGH_QOS, 1001);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+    }
+
+    private void setPriorities(Configuration conf, int indexPrioritymin, int metadataPriority) {
+        conf.setInt(QueryServices.INDEX_PRIOIRTY_ATTRIB, indexPrioritymin);
+        conf.setInt(QueryServices.METADATA_PRIOIRTY_ATTRIB, metadataPriority);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index e4ec56a..748ad19 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -457,7 +457,7 @@ public abstract class BaseTest {
     }
     
     private static final String ORG_ID = "00D300000000XHP";
-    private static final int NUM_SLAVES_BASE = 1;
+    protected static int NUM_SLAVES_BASE = 1;
     
     protected static String getZKClientPort(Configuration conf) {
         return conf.get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
@@ -531,9 +531,13 @@ public abstract class BaseTest {
     }
             
     protected static void setUpTestDriver(ReadOnlyProps props) throws Exception {
-        String url = checkClusterInitialized(props);
+        setUpTestDriver(props, props);
+    }
+    
+    protected static void setUpTestDriver(ReadOnlyProps serverProps, ReadOnlyProps clientProps) throws Exception {
+        String url = checkClusterInitialized(serverProps);
         if (driver == null) {
-            driver = initAndRegisterDriver(url, props);
+            driver = initAndRegisterDriver(url, clientProps);
         }
     }
 
@@ -557,7 +561,7 @@ public abstract class BaseTest {
         setUpConfigForMiniCluster(conf, overrideProps);
         utility = new HBaseTestingUtility(conf);
         try {
-            utility.startMiniCluster();
+            utility.startMiniCluster(NUM_SLAVES_BASE);
             // add shutdown hook to kill the mini cluster
             Runtime.getRuntime().addShutdownHook(new Thread() {
                 @Override


[19/31] phoenix git commit: PHOENIX-1722 Speedup CONVERT_TZ function (Vaclav Loffelmann)

Posted by ap...@apache.org.
PHOENIX-1722 Speedup CONVERT_TZ function (Vaclav Loffelmann)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4248be3d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4248be3d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4248be3d

Branch: refs/heads/4.x-HBase-1.x
Commit: 4248be3d8a5a3efeb0e103eadac4594fe5de9519
Parents: 709d867
Author: Thomas <td...@salesforce.com>
Authored: Fri Mar 27 15:17:21 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Fri Mar 27 15:17:21 2015 -0700

----------------------------------------------------------------------
 .../end2end/ConvertTimezoneFunctionIT.java      | 24 ++++++++++++-
 .../function/ConvertTimezoneFunction.java       | 38 +++++---------------
 .../function/TimezoneOffsetFunction.java        | 25 +++----------
 pom.xml                                         |  2 +-
 4 files changed, 38 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4248be3d/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
index d89a03b..f415dc6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
@@ -23,8 +23,10 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 
 import org.apache.phoenix.exception.SQLExceptionCode;
+import static org.junit.Assert.assertFalse;
 import org.junit.Test;
 
 /**
@@ -129,7 +131,7 @@ public class ConvertTimezoneFunctionIT extends BaseHBaseManagedTimeIT {
         try {
             ResultSet rs = conn.createStatement().executeQuery(
                     "SELECT k1, dates, CONVERT_TZ(dates, 'UNKNOWN_TIMEZONE', 'America/Adak') FROM TIMEZONE_OFFSET_TEST");
-    
+
             rs.next();
             rs.getDate(3).getTime();
             fail();
@@ -137,4 +139,24 @@ public class ConvertTimezoneFunctionIT extends BaseHBaseManagedTimeIT {
             assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), e.getErrorCode());
         }
     }
+
+	@Test
+	public void testConvertMultipleRecords() throws Exception {
+		Connection conn = DriverManager.getConnection(getUrl());
+		String ddl = "CREATE TABLE IF NOT EXISTS TIMEZONE_OFFSET_TEST (k1 INTEGER NOT NULL, dates DATE CONSTRAINT pk PRIMARY KEY (k1))";
+		Statement stmt = conn.createStatement();
+		stmt.execute(ddl);
+		stmt.execute("UPSERT INTO TIMEZONE_OFFSET_TEST (k1, dates) VALUES (1, TO_DATE('2014-03-01 00:00:00'))");
+		stmt.execute("UPSERT INTO TIMEZONE_OFFSET_TEST (k1, dates) VALUES (2, TO_DATE('2014-03-01 00:00:00'))");
+		conn.commit();
+
+		ResultSet rs = stmt.executeQuery(
+				"SELECT k1, dates, CONVERT_TZ(dates, 'UTC', 'America/Adak') FROM TIMEZONE_OFFSET_TEST");
+
+		assertTrue(rs.next());
+		assertEquals(1393596000000L, rs.getDate(3).getTime()); //Fri, 28 Feb 2014 14:00:00
+		assertTrue(rs.next());
+		assertEquals(1393596000000L, rs.getDate(3).getTime()); //Fri, 28 Feb 2014 14:00:00
+		assertFalse(rs.next());
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4248be3d/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
index dcde31f..3ea47a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
@@ -15,21 +15,17 @@
  */
 package org.apache.phoenix.expression.function;
 
-import java.sql.Date;
 import java.sql.SQLException;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.cache.JodaTimezoneCache;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
-import org.apache.phoenix.schema.IllegalDataException;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.joda.time.DateTimeZone;
 
 /**
  * Build in function CONVERT_TZ(date, 'timezone_from', 'timezone_to). Convert date from one timezone to
@@ -43,7 +39,6 @@ import org.apache.phoenix.schema.tuple.Tuple;
 public class ConvertTimezoneFunction extends ScalarFunction {
 
     public static final String NAME = "CONVERT_TZ";
-    private final Map<String, TimeZone> cachedTimeZones = new HashMap<String, TimeZone>();
 
     public ConvertTimezoneFunction() {
     }
@@ -62,40 +57,25 @@ public class ConvertTimezoneFunction extends ScalarFunction {
         if (!children.get(0).evaluate(tuple, ptr)) {
             return false;
         }
-
-        Date dateo = (Date) PDate.INSTANCE.toObject(ptr, children.get(0).getSortOrder());
-        Long date = dateo.getTime();
+        long date = PDate.INSTANCE.getCodec().decodeLong(ptr, children.get(0).getSortOrder());
 
         if (!children.get(1).evaluate(tuple, ptr)) {
             return false;
         }
-        TimeZone timezoneFrom = getTimezoneFromCache(Bytes.toString(ptr.get(), ptr.getOffset(), ptr.getLength()));
+        DateTimeZone timezoneFrom = JodaTimezoneCache.getInstance(ptr);
 
         if (!children.get(2).evaluate(tuple, ptr)) {
             return false;
         }
-        TimeZone timezoneTo = TimeZone.getTimeZone(Bytes.toString(ptr.get(), ptr.getOffset(), ptr.getLength()));
-
-        long dateInUtc = date - timezoneFrom.getOffset(date);
-        long dateInTo = dateInUtc + timezoneTo.getOffset(dateInUtc);
-
-        ptr.set(PDate.INSTANCE.toBytes(new Date(dateInTo)));
+        DateTimeZone timezoneTo = JodaTimezoneCache.getInstance(ptr);
 
+        long convertedDate = date - timezoneFrom.getOffset(date) + timezoneTo.getOffset(date);
+        byte[] outBytes = new byte[8];
+        PDate.INSTANCE.getCodec().encodeLong(convertedDate, outBytes, 0);
+        ptr.set(outBytes);
         return true;
     }
 
-    private TimeZone getTimezoneFromCache(String timezone) throws IllegalDataException {
-        if (!cachedTimeZones.containsKey(timezone)) {
-            TimeZone tz = TimeZone.getTimeZone(timezone);
-            if (!tz.getID().equals(timezone)) {
-                throw new IllegalDataException("Invalid timezone " + timezone);
-            }
-            cachedTimeZones.put(timezone, tz);
-            return tz;
-        }
-        return cachedTimeZones.get(timezone);
-    }
-
     @Override
     public PDataType getDataType() {
         return PDate.INSTANCE;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4248be3d/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java
index 2cfbc25..8c70346 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java
@@ -18,22 +18,18 @@
 
 package org.apache.phoenix.expression.function;
 
-import java.sql.Date;
 import java.sql.SQLException;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.cache.JodaTimezoneCache;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
-import org.apache.phoenix.schema.IllegalDataException;
 import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.joda.time.DateTimeZone;
 
 /**
  * Returns offset (shift in minutes) of timezone at particular datetime in minutes.
@@ -45,7 +41,6 @@ public class TimezoneOffsetFunction extends ScalarFunction {
 
     public static final String NAME = "TIMEZONE_OFFSET";
     private static final int MILLIS_TO_MINUTES = 60 * 1000;
-    private final Map<String, TimeZone> cachedTimeZones = new HashMap<String, TimeZone>();
 
     public TimezoneOffsetFunction() {
     }
@@ -64,24 +59,14 @@ public class TimezoneOffsetFunction extends ScalarFunction {
         if (!children.get(0).evaluate(tuple, ptr)) {
             return false;
         }
-
-        String timezone = Bytes.toString(ptr.get(), ptr.getOffset(), ptr.getLength());
+        DateTimeZone timezoneInstance = JodaTimezoneCache.getInstance(ptr);
 
         if (!children.get(1).evaluate(tuple, ptr)) {
             return false;
         }
+        long date = PDate.INSTANCE.getCodec().decodeLong(ptr, children.get(1).getSortOrder());
 
-        if (!cachedTimeZones.containsKey(timezone)) {
-            TimeZone tz = TimeZone.getTimeZone(timezone);
-            if (!tz.getID().equals(timezone)) {
-                throw new IllegalDataException("Invalid timezone " + timezone);
-            }
-            cachedTimeZones.put(timezone, tz);
-        }
-
-		Date date = (Date) PDate.INSTANCE.toObject(ptr, children.get(1).getSortOrder());
-		int offset = cachedTimeZones.get(timezone).getOffset(date.getTime());
-
+        int offset = timezoneInstance.getOffset(date);
         ptr.set(PInteger.INSTANCE.toBytes(offset / MILLIS_TO_MINUTES));
         return true;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4248be3d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0e656e7..63e5735 100644
--- a/pom.xml
+++ b/pom.xml
@@ -103,7 +103,7 @@
     <commons-codec.version>1.7</commons-codec.version>
     <htrace.version>3.1.0-incubating</htrace.version>
     <collections.version>3.2.1</collections.version>
-    <jodatime.version>2.3</jodatime.version>
+    <jodatime.version>2.7</jodatime.version>
 
     <!-- Test Dependencies -->
     <mockito-all.version>1.8.5</mockito-all.version>


[03/31] phoenix git commit: PHOENIX-1642 Make Phoenix Master Branch pointing to HBase1.0.0

Posted by ap...@apache.org.
PHOENIX-1642 Make Phoenix Master Branch pointing to HBase1.0.0

Conflicts:
	phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a29e163f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a29e163f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a29e163f

Branch: refs/heads/4.x-HBase-1.x
Commit: a29e163fcdf3ec06c98de423bfd34861af227307
Parents: 03fce01
Author: Enis Soztutar <en...@apache.org>
Authored: Thu Mar 19 12:07:16 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Thu Mar 19 13:37:21 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/src/build/client.xml           |   4 +-
 phoenix-core/pom.xml                            |   4 +-
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  34 ++--
 .../phoenix/end2end/index/LocalIndexIT.java     |  32 ++--
 .../end2end/index/MutableIndexFailureIT.java    |   6 +-
 .../index/balancer/IndexLoadBalancerIT.java     |   6 +-
 .../phoenix/trace/PhoenixTraceReaderIT.java     |   2 +-
 .../phoenix/trace/PhoenixTracingEndToEndIT.java |  20 +-
 .../regionserver/IndexHalfStoreFileReader.java  |  41 ++++-
 .../IndexHalfStoreFileReaderGenerator.java      |  14 +-
 .../regionserver/IndexSplitTransaction.java     |  28 +--
 .../hbase/regionserver/KeyValueSkipListSet.java | 183 +++++++++++++++++++
 .../hbase/regionserver/LocalIndexMerger.java    |   4 +-
 .../hbase/regionserver/LocalIndexSplitter.java  |  29 +--
 .../apache/phoenix/compile/TraceQueryPlan.java  |  14 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   4 +-
 .../apache/phoenix/execute/BaseQueryPlan.java   |   2 +-
 .../apache/phoenix/execute/MutationState.java   |   4 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |  10 +-
 .../hbase/index/balancer/IndexLoadBalancer.java |   5 +
 .../hbase/index/covered/data/IndexMemStore.java |  27 +--
 .../index/covered/data/LazyValueGetter.java     |   5 +-
 .../example/CoveredColumnIndexCodec.java        |   6 +-
 .../filter/ApplyAndFilterDeletesFilter.java     |   8 +-
 .../index/covered/update/ColumnReference.java   |  10 +-
 .../ipc/PhoenixIndexRpcSchedulerFactory.java    |  19 +-
 .../index/scanner/FilteredKeyValueScanner.java  |  17 +-
 .../phoenix/hbase/index/scanner/Scanner.java    |   5 +-
 .../hbase/index/scanner/ScannerBuilder.java     |  10 +-
 .../hbase/index/wal/IndexedKeyValue.java        |  17 --
 .../apache/phoenix/jdbc/PhoenixConnection.java  |   4 +-
 .../apache/phoenix/trace/TraceMetricSource.java |  15 +-
 .../org/apache/phoenix/trace/TraceReader.java   |   2 +-
 .../apache/phoenix/trace/TracingIterator.java   |   2 +-
 .../org/apache/phoenix/trace/TracingUtils.java  |   2 +-
 .../org/apache/phoenix/trace/util/NullSpan.java |  10 +-
 .../org/apache/phoenix/trace/util/Tracing.java  |  51 ++++--
 .../java/org/apache/phoenix/util/IndexUtil.java |  12 +-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |   4 +-
 .../PhoenixIndexRpcSchedulerFactoryTest.java    |   6 +-
 .../index/covered/TestLocalTableState.java      |   8 +-
 .../index/covered/data/TestIndexMemStore.java   |   5 +-
 .../index/write/TestWALRecoveryCaching.java     |  14 +-
 .../recovery/TestPerRegionIndexWriteCache.java  |  15 +-
 .../phoenix/trace/TraceMetricsSourceTest.java   |   4 +-
 phoenix-flume/pom.xml                           |   4 +-
 pom.xml                                         |  14 +-
 47 files changed, 481 insertions(+), 261 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-assembly/src/build/client.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/client.xml b/phoenix-assembly/src/build/client.xml
index f674331..101ccd6 100644
--- a/phoenix-assembly/src/build/client.xml
+++ b/phoenix-assembly/src/build/client.xml
@@ -46,8 +46,8 @@
         <include>jline:jline</include>
         <include>sqlline:sqlline</include>
         <include>org.apache.hbase:hbase*</include>
-        <include>org.cloudera.htrace:htrace-core</include>
-        <include>io.netty:netty</include>
+        <include>org.apache.htrace:htrace-core</include>
+        <include>io.netty:netty-all</include>
         <include>commons-codec:commons-codec</include>
       </includes>
     </dependencySet>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index a325b27..d4dc2e2 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -318,12 +318,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.cloudera.htrace</groupId>
+      <groupId>org.apache.htrace</groupId>
       <artifactId>htrace-core</artifactId>
     </dependency>
     <dependency>
       <groupId>io.netty</groupId>
-      <artifactId>netty</artifactId>
+      <artifactId>netty-all</artifactId>
     </dependency>
     <dependency>
       <groupId>commons-codec</groupId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 8cf8a8a..3b8ff29 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -48,6 +48,9 @@ import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.hbase.index.IndexTestingUtils;
 import org.apache.phoenix.hbase.index.TableName;
@@ -65,7 +68,7 @@ import org.mockito.Mockito;
 
 /**
  * For pre-0.94.9 instances, this class tests correctly deserializing WALEdits w/o compression. Post
- * 0.94.9 we can support a custom {@link WALEditCodec}, which handles reading/writing the compressed
+ * 0.94.9 we can support a custom  {@link WALCellCodec} which handles reading/writing the compressed
  * edits.
  * <p>
  * Most of the underlying work (creating/splitting the WAL, etc) is from
@@ -93,13 +96,12 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
   @Before
   public void setUp() throws Exception {
     setupCluster();
+    Path hbaseRootDir = UTIL.getDataTestDir();
     this.conf = HBaseConfiguration.create(UTIL.getConfiguration());
     this.fs = UTIL.getDFSCluster().getFileSystem();
     this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
     this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
     this.logDir = new Path(this.hbaseRootDir, HConstants.HREGION_LOGDIR_NAME);
-    // reset the log reader to ensure we pull the one from this config
-    HLogFactory.resetLogReaderClass();
   }
 
   private void setupCluster() throws Exception {
@@ -133,11 +135,11 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
   protected void startCluster() throws Exception {
     UTIL.startMiniDFSCluster(3);
     UTIL.startMiniZKCluster();
-    UTIL.startMiniHBaseCluster(1, 1);
 
     Path hbaseRootDir = UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
     LOG.info("hbase.rootdir=" + hbaseRootDir);
     UTIL.getConfiguration().set(HConstants.HBASE_DIR, hbaseRootDir.toString());
+    UTIL.startMiniHBaseCluster(1, 1);
   }
 
   @After
@@ -183,8 +185,11 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     // create the region + its WAL
     HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
     region0.close();
-    region0.getLog().closeAndDelete();
-    HLog wal = createWAL(this.conf);
+    region0.getWAL().close();
+
+    WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234");
+
+    WAL wal = createWAL(this.conf, walFactory);
     RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
     // mock out some of the internals of the RSS, so we can run CPs
     Mockito.when(mockRS.getWAL(null)).thenReturn(wal);
@@ -206,15 +211,13 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     // we should then see the server go down
     Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
       Mockito.any(Exception.class));
-    region.close(true);
-    wal.close();
 
     // then create the index table so we are successful on WAL replay
     CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);
 
     // run the WAL split and setup the region
-    runWALSplit(this.conf);
-    HLog wal2 = createWAL(this.conf);
+    runWALSplit(this.conf, walFactory);
+    WAL wal2 = createWAL(this.conf, walFactory);
     HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);
 
     // initialize the region - this should replay the WALEdits from the WAL
@@ -257,8 +260,9 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
    * @return WAL with retries set down from 5 to 1 only.
    * @throws IOException
    */
-  private HLog createWAL(final Configuration c) throws IOException {
-    HLog wal = HLogFactory.createHLog(FileSystem.get(c), logDir, "localhost,1234", c);
+  private WAL createWAL(final Configuration c, WALFactory walFactory) throws IOException {
+    WAL wal = walFactory.getWAL(new byte[]{});
+
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
     // long gone.
     HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal).getOutputStream(), 1);
@@ -271,11 +275,11 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
    * @return The single split file made
    * @throws IOException
    */
-  private Path runWALSplit(final Configuration c) throws IOException {
+  private Path runWALSplit(final Configuration c, WALFactory walFactory) throws IOException {
     FileSystem fs = FileSystem.get(c);
     
-    List<Path> splits = HLogSplitter.split(this.hbaseRootDir, new Path(this.logDir, "localhost,1234"),
-        this.oldLogDir, fs, c);
+    List<Path> splits = WALSplitter.split(this.hbaseRootDir, new Path(this.logDir, "localhost,1234"),
+        this.oldLogDir, fs, c, walFactory);
     // Split should generate only 1 file since there's only 1 region
     assertEquals("splits=" + splits, 1, splits.size());
     // Make sure the file exists

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index a7b7655..5e01510 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -37,9 +37,8 @@ import java.util.concurrent.CountDownLatch;
 
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
@@ -727,24 +726,27 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
             
             HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
             for (int i = 1; i < 5; i++) {
-                CatalogTracker ct = new CatalogTracker(admin.getConfiguration());
                 admin.split(Bytes.toBytes(TestUtil.DEFAULT_DATA_TABLE_NAME), ByteUtil.concat(Bytes.toBytes(strings[3*i])));
                 List<HRegionInfo> regionsOfUserTable =
-                        MetaReader.getTableRegions(ct, TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+                        MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(),
+                                TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
 
                 while (regionsOfUserTable.size() != (4+i)) {
                     Thread.sleep(100);
-                    regionsOfUserTable = MetaReader.getTableRegions(ct, TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+                    regionsOfUserTable = MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                            admin.getConnection(), TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
                 }
                 assertEquals(4+i, regionsOfUserTable.size());
                 TableName indexTable =
                         TableName.valueOf(MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME));
                 List<HRegionInfo> regionsOfIndexTable =
-                        MetaReader.getTableRegions(ct, indexTable, false);
+                        MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                                admin.getConnection(), indexTable, false);
 
                 while (regionsOfIndexTable.size() != (4 + i)) {
                     Thread.sleep(100);
-                    regionsOfIndexTable = MetaReader.getTableRegions(ct, indexTable, false);
+                    regionsOfIndexTable = MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                            admin.getConnection(), indexTable, false);
                 }
                 assertEquals(4 + i, regionsOfIndexTable.size());
                 String query = "SELECT t_id,k1,v1 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME;
@@ -847,32 +849,32 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
             assertTrue(rs.next());
 
             HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
-            CatalogTracker ct = new CatalogTracker(admin.getConfiguration());
             List<HRegionInfo> regionsOfUserTable =
-                    MetaReader.getTableRegions(ct,
+                    MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(),
                         TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
             admin.mergeRegions(regionsOfUserTable.get(0).getEncodedNameAsBytes(),
                 regionsOfUserTable.get(1).getEncodedNameAsBytes(), false);
             regionsOfUserTable =
-                    MetaReader.getTableRegions(ct,
+                    MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), admin.getConnection(),
                         TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
 
             while (regionsOfUserTable.size() != 3) {
                 Thread.sleep(100);
-                regionsOfUserTable =
-                        MetaReader.getTableRegions(ct,
-                            TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+                regionsOfUserTable = MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                        admin.getConnection(), TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
             }
             assertEquals(3, regionsOfUserTable.size());
             TableName indexTable =
                     TableName.valueOf(MetaDataUtil
                             .getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME));
             List<HRegionInfo> regionsOfIndexTable =
-                    MetaReader.getTableRegions(ct, indexTable, false);
+                    MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
+                            admin.getConnection(), indexTable, false);
 
             while (regionsOfIndexTable.size() != 3) {
                 Thread.sleep(100);
-                regionsOfIndexTable = MetaReader.getTableRegions(ct, indexTable, false);
+                regionsOfIndexTable = MetaTableAccessor.getTableRegions(
+                        getUtility().getZooKeeperWatcher(), admin.getConnection(), indexTable, false);
             }
             assertEquals(3, regionsOfIndexTable.size());
             String query = "SELECT t_id,k1,v1 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index d11c059..dfc7ffb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -304,7 +304,8 @@ public class MutableIndexFailureIT extends BaseTest {
         Collection<ServerName> rss = cluster.getClusterStatus().getServers();
         HBaseAdmin admin = this.util.getHBaseAdmin();
         List<HRegionInfo> regions = admin.getTableRegions(catalogTable);
-        ServerName catalogRS = cluster.getServerHoldingRegion(regions.get(0).getRegionName());
+        ServerName catalogRS = cluster.getServerHoldingRegion(regions.get(0).getTable(),
+                regions.get(0).getRegionName());
         ServerName metaRS = cluster.getServerHoldingMeta();
         ServerName rsToBeKilled = null;
         
@@ -324,7 +325,8 @@ public class MutableIndexFailureIT extends BaseTest {
         this.util.waitFor(30000, 200, new Waiter.Predicate<Exception>() {
             @Override
             public boolean evaluate() throws Exception {
-              ServerName sn = cluster.getServerHoldingRegion(indexRegion.getRegionName());
+              ServerName sn = cluster.getServerHoldingRegion(indexRegion.getTable(),
+                      indexRegion.getRegionName());
               return (sn != null && sn.equals(dstRS));
             }
           });

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
index d534b6a..449dccf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancerIT.java
@@ -29,10 +29,10 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
@@ -447,8 +447,8 @@ public class IndexLoadBalancerIT {
             throws IOException, InterruptedException {
 
         List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations =
-                MetaReader.getTableRegionsAndLocations(master.getCatalogTracker(), TableName
-                        .valueOf(tableName));
+                MetaTableAccessor.getTableRegionsAndLocations(master.getZooKeeper(), master.getConnection(),
+                        TableName.valueOf(tableName));
         List<Pair<byte[], ServerName>> startKeyAndLocationPairs =
                 new ArrayList<Pair<byte[], ServerName>>(tableRegionsAndLocations.size());
         Pair<byte[], ServerName> startKeyAndLocation = null;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
index 1308c13..2315074 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
-import org.cloudera.htrace.Span;
+import org.apache.htrace.Span;
 import org.junit.Test;
 
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
index 05d9e41..8febfff 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
@@ -35,18 +35,18 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.Span;
+import org.apache.htrace.SpanReceiver;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+import org.apache.htrace.impl.ProbabilitySampler;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.metrics.Metrics;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.SpanReceiver;
-import org.cloudera.htrace.Trace;
-import org.cloudera.htrace.TraceScope;
-import org.cloudera.htrace.impl.ProbabilitySampler;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -349,7 +349,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         });
         assertTrue("Didn't find the parallel scanner in the tracing", found);
     }
-    
+
     @Test
     public void testCustomAnnotationTracing() throws Exception {
     	final String customAnnotationKey = "myannot";
@@ -375,7 +375,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         stmt.execute();
         conn.commit();
         conn.rollback();
-        
+
         // setup for next set of updates
         stmt.setString(1, "key2");
         stmt.setLong(2, 2);
@@ -456,10 +456,10 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
             	return currentTrace.toString().contains(annotationKey + " - " + annotationValue);
             }
         });
-        
+
         assertTrue("Didn't find the custom annotation in the tracing", tracingComplete);
     }
-    
+
     private boolean checkStoredTraces(Connection conn, TraceChecker checker) throws Exception {
         TraceReader reader = new TraceReader(conn);
         int retries = 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 172486d..654daf0 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -25,11 +25,13 @@ import java.util.Map.Entry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -159,7 +161,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return getChangedKey(delegate.getKeyValue(), changeBottomKeys);
             }
             
-            private ByteBuffer getChangedKey(KeyValue kv, boolean changeBottomKeys) {
+            private ByteBuffer getChangedKey(Cell kv, boolean changeBottomKeys) {
                 // new KeyValue(row, family, qualifier, timestamp, type, value)
                 byte[] newRowkey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(kv, changeBottomKeys);
                 KeyValue newKv =
@@ -171,7 +173,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return keyBuffer;
             }
 
-            private byte[] getNewRowkeyByRegionStartKeyReplacedWithSplitKey(KeyValue kv, boolean changeBottomKeys) {
+            private byte[] getNewRowkeyByRegionStartKeyReplacedWithSplitKey(Cell kv, boolean changeBottomKeys) {
                 int lenOfRemainingKey = kv.getRowLength() - offset;
                 byte[] keyReplacedStartKey = new byte[lenOfRemainingKey + splitRow.length];
                 System.arraycopy(changeBottomKeys ? new byte[splitRow.length] : splitRow, 0,
@@ -202,11 +204,11 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return Bytes.toStringBinary(getValue());
             }
 
-            public KeyValue getKeyValue() {
+            public Cell getKeyValue() {
                 if (atEnd) {
                     return null;
                 }
-                KeyValue kv = delegate.getKeyValue();
+                Cell kv = delegate.getKeyValue();
                 boolean changeBottomKeys =
                         regionInfo.getStartKey().length == 0 && splitRow.length != offset;
                 if (!top) {
@@ -221,7 +223,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                                 kv.getQualifierOffset(), kv.getQualifierLength(),
                                 kv.getTimestamp(), Type.codeToType(kv.getTypeByte()),
                                 kv.getValueArray(), kv.getValueOffset(), kv.getValueLength(),
-                                kv.getTags());
+                                kv.getTagsArray(), kv.getTagsOffset(), kv.getTagsLength());
                 return changedKv;
             }
 
@@ -251,6 +253,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
             }
 
             public boolean seekBefore(byte[] key, int offset, int length) throws IOException {
+
                 if (top) {
                     byte[] fk = getFirstKey();
                     // This will be null when the file is empty in which we can not seekBefore to
@@ -262,8 +265,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                         return false;
                     }
                     KeyValue replacedKey = getKeyPresentInHFiles(key);
-                    return this.delegate.seekBefore(replacedKey.getBuffer(),
-                        replacedKey.getKeyOffset(), replacedKey.getKeyLength());
+                    return this.delegate.seekBefore(replacedKey);
                 } else {
                     // The equals sign isn't strictly necessary just here to be consistent with
                     // seekTo
@@ -274,6 +276,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return this.delegate.seekBefore(key, offset, length);
             }
 
+            @Override
+            public boolean seekBefore(Cell cell) throws IOException {
+                KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+                return seekBefore(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
+            }
+
             public boolean seekTo() throws IOException {
                 boolean b = delegate.seekTo();
                 if (!b) {
@@ -328,6 +336,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return delegate.seekTo(key, offset, length);
             }
 
+            @Override
+            public int seekTo(Cell cell) throws IOException {
+                KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+                return seekTo(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
+            }
+
             public int reseekTo(byte[] key) throws IOException {
                 return reseekTo(key, 0, key.length);
             }
@@ -355,6 +369,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return delegate.reseekTo(key, offset, length);
             }
 
+            @Override
+            public int reseekTo(Cell cell) throws IOException {
+                KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+                return reseekTo(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
+            }
+
             public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() {
                 return this.delegate.getReader();
             }
@@ -373,7 +393,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
         };
     }
 
-    private boolean isSatisfiedMidKeyCondition(KeyValue kv) {
+    private boolean isSatisfiedMidKeyCondition(Cell kv) {
         if (CellUtil.isDelete(kv) && kv.getValueLength() == 0) {
             // In case of a Delete type KV, let it be going to both the daughter regions.
             // No problems in doing so. In the correct daughter region where it belongs to, this delete
@@ -428,9 +448,10 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 && keyValue.getTimestamp() == HConstants.LATEST_TIMESTAMP
                 && Bytes.compareTo(keyValue.getRowArray(), keyValue.getRowOffset(),
                     keyValue.getRowLength(), splitRow, 0, splitRow.length) == 0
-                && keyValue.isDeleteFamily()) {
+                && CellUtil.isDeleteFamily(keyValue)) {
             KeyValue createFirstDeleteFamilyOnRow =
-                    KeyValue.createFirstDeleteFamilyOnRow(regionStartKeyInHFile, keyValue.getFamily());
+                    KeyValueUtil.createFirstDeleteFamilyOnRow(regionStartKeyInHFile,
+                            keyValue.getFamily());
             return createFirstDeleteFamilyOnRow;
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 718f820..1284dcf 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -80,7 +80,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
         HRegionInfo childRegion = region.getRegionInfo();
         byte[] splitKey = null;
         if (reader == null && r != null) {
-            Scan scan = MetaReader.getScanForTableName(tableName);
+            Scan scan = MetaTableAccessor.getScanForTableName(tableName);
             SingleColumnValueFilter scvf = null;
             if (Reference.isTopFileRegion(r.getFileRegion())) {
                 scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
@@ -107,8 +107,8 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                 }
                 if (result == null || result.isEmpty()) {
                     Pair<HRegionInfo, HRegionInfo> mergeRegions =
-                            MetaReader.getRegionsFromMergeQualifier(ctx.getEnvironment()
-                                    .getRegionServerServices().getCatalogTracker(),
+                            MetaTableAccessor.getRegionsFromMergeQualifier(ctx.getEnvironment()
+                                    .getRegionServerServices().getConnection(),
                                 region.getRegionName());
                     if (mergeRegions == null || mergeRegions.getFirst() == null) return reader;
                     byte[] splitRow =
@@ -121,10 +121,8 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                         childRegion = mergeRegions.getSecond();
                         regionStartKeyInHFile = mergeRegions.getSecond().getStartKey();
                     }
-                    splitKey =
-                            KeyValue.createFirstOnRow(
-                                region.getStartKey().length == 0 ? new byte[region.getEndKey().length] : region
-                                        .getStartKey()).getKey();
+                    splitKey = KeyValue.createFirstOnRow(region.getStartKey().length == 0 ?
+                            new byte[region.getEndKey().length] : region.getStartKey()).getKey();
                 } else {
                     HRegionInfo parentRegion = HRegionInfo.getHRegionInfo(result);
                     regionStartKeyInHFile =

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
index 048506d..920380b 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
@@ -42,11 +42,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -286,11 +286,11 @@ public class IndexSplitTransaction extends SplitTransaction {
     // and assign the parent region.
     if (!testing) {
       if (metaEntries == null || metaEntries.isEmpty()) {
-        MetaEditor.splitRegion(server.getCatalogTracker(),
-            parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(),
-            daughterRegions.getSecond().getRegionInfo(), server.getServerName());
+        MetaTableAccessor.splitRegion(server.getConnection(), parent.getRegionInfo(),
+                daughterRegions.getFirst().getRegionInfo(),
+                daughterRegions.getSecond().getRegionInfo(), server.getServerName());
       } else {
-        offlineParentInMetaAndputMetaEntries(server.getCatalogTracker(),
+        offlineParentInMetaAndputMetaEntries(server.getConnection(),
           parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(), daughterRegions
               .getSecond().getRegionInfo(), server.getServerName(), metaEntries);
       }
@@ -415,10 +415,10 @@ public class IndexSplitTransaction extends SplitTransaction {
       if (services != null) {
         try {
           // add 2nd daughter first (see HBASE-4335)
-          services.postOpenDeployTasks(b, server.getCatalogTracker());
+          services.postOpenDeployTasks(b);
           // Should add it to OnlineRegions
           services.addToOnlineRegions(b);
-          services.postOpenDeployTasks(a, server.getCatalogTracker());
+          services.postOpenDeployTasks(a);
           services.addToOnlineRegions(a);
         } catch (KeeperException ke) {
           throw new IOException(ke);
@@ -583,7 +583,7 @@ public class IndexSplitTransaction extends SplitTransaction {
     return regions;
   }
 
-  private void offlineParentInMetaAndputMetaEntries(CatalogTracker catalogTracker,
+  private void offlineParentInMetaAndputMetaEntries(Connection conn,
       HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
       ServerName serverName, List<Mutation> metaEntries) throws IOException {
     List<Mutation> mutations = metaEntries;
@@ -592,19 +592,19 @@ public class IndexSplitTransaction extends SplitTransaction {
     copyOfParent.setSplit(true);
 
     //Put for parent
-    Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
-    MetaEditor.addDaughtersToPut(putParent, splitA, splitB);
+    Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
+    MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
     mutations.add(putParent);
     
     //Puts for daughters
-    Put putA = MetaEditor.makePutFromRegionInfo(splitA);
-    Put putB = MetaEditor.makePutFromRegionInfo(splitB);
+    Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
+    Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
 
     addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
     addLocation(putB, serverName, 1);
     mutations.add(putA);
     mutations.add(putB);
-    MetaEditor.mutateMetaTable(catalogTracker, mutations);
+    MetaTableAccessor.mutateMetaTable(conn, mutations);
   }
 
   public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
new file mode 100644
index 0000000..211aa10
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
@@ -0,0 +1,183 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.KeyValue;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.NavigableSet;
+import java.util.SortedSet;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+/**
+ * A {@link java.util.Set} of {@link KeyValue}s implemented on top of a
+ * {@link java.util.concurrent.ConcurrentSkipListMap}.  Works like a
+ * {@link java.util.concurrent.ConcurrentSkipListSet} in all but one regard:
+ * An add will overwrite if already an entry for the added key.  In other words,
+ * where CSLS does "Adds the specified element to this set if it is not already
+ * present.", this implementation "Adds the specified element to this set EVEN
+ * if it is already present overwriting what was there previous".  The call to
+ * add returns true if no value in the backing map or false if there was an
+ * entry with same key (though value may be different).
+ * <p>Otherwise,
+ * has same attributes as ConcurrentSkipListSet: e.g. tolerant of concurrent
+ * get and set and won't throw ConcurrentModificationException when iterating.
+ */
+public class KeyValueSkipListSet implements NavigableSet<KeyValue> {
+  private final ConcurrentNavigableMap<KeyValue, KeyValue> delegatee;
+
+  KeyValueSkipListSet(final KeyValue.KVComparator c) {
+    this.delegatee = new ConcurrentSkipListMap<KeyValue, KeyValue>(c);
+  }
+
+  KeyValueSkipListSet(final ConcurrentNavigableMap<KeyValue, KeyValue> m) {
+    this.delegatee = m;
+  }
+
+  public KeyValue ceiling(KeyValue e) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public Iterator<KeyValue> descendingIterator() {
+    return this.delegatee.descendingMap().values().iterator();
+  }
+
+  public NavigableSet<KeyValue> descendingSet() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue floor(KeyValue e) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public SortedSet<KeyValue> headSet(final KeyValue toElement) {
+    return headSet(toElement, false);
+  }
+
+  public NavigableSet<KeyValue> headSet(final KeyValue toElement,
+      boolean inclusive) {
+    return new KeyValueSkipListSet(this.delegatee.headMap(toElement, inclusive));
+  }
+
+  public KeyValue higher(KeyValue e) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public Iterator<KeyValue> iterator() {
+    return this.delegatee.values().iterator();
+  }
+
+  public KeyValue lower(KeyValue e) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue pollFirst() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue pollLast() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public SortedSet<KeyValue> subSet(KeyValue fromElement, KeyValue toElement) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public NavigableSet<KeyValue> subSet(KeyValue fromElement,
+      boolean fromInclusive, KeyValue toElement, boolean toInclusive) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public SortedSet<KeyValue> tailSet(KeyValue fromElement) {
+    return tailSet(fromElement, true);
+  }
+
+  public NavigableSet<KeyValue> tailSet(KeyValue fromElement, boolean inclusive) {
+    return new KeyValueSkipListSet(this.delegatee.tailMap(fromElement, inclusive));
+  }
+
+  public Comparator<? super KeyValue> comparator() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue first() {
+    return this.delegatee.get(this.delegatee.firstKey());
+  }
+
+  public KeyValue last() {
+    return this.delegatee.get(this.delegatee.lastKey());
+  }
+
+  public boolean add(KeyValue e) {
+    return this.delegatee.put(e, e) == null;
+  }
+
+  public boolean addAll(Collection<? extends KeyValue> c) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public void clear() {
+    this.delegatee.clear();
+  }
+
+  public boolean contains(Object o) {
+    //noinspection SuspiciousMethodCalls
+    return this.delegatee.containsKey(o);
+  }
+
+  public boolean containsAll(Collection<?> c) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public boolean isEmpty() {
+    return this.delegatee.isEmpty();
+  }
+
+  public boolean remove(Object o) {
+    return this.delegatee.remove(o) != null;
+  }
+
+  public boolean removeAll(Collection<?> c) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public boolean retainAll(Collection<?> c) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public KeyValue get(KeyValue kv) {
+    return this.delegatee.get(kv);
+  }
+
+  public int size() {
+    return this.delegatee.size();
+  }
+
+  public Object[] toArray() {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+
+  public <T> T[] toArray(T[] a) {
+    throw new UnsupportedOperationException("Not implemented");
+  }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
index 6f8dd79..f074df7 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
@@ -23,8 +23,8 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionServerObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -55,7 +55,7 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
                         .getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
             TableName indexTable =
                     TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
-            if (!MetaReader.tableExists(rs.getCatalogTracker(), indexTable)) return;
+            if (!MetaTableAccessor.tableExists(rs.getConnection(), indexTable)) return;
             HRegion indexRegionA = IndexUtil.getIndexRegion(regionA, ctx.getEnvironment());
             if (indexRegionA == null) {
                 LOG.warn("Index region corresponindg to data region " + regionA

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
index 2ac61cb..9af8251 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
@@ -17,17 +17,12 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.List;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
@@ -39,16 +34,20 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.parse.AlterIndexStatement;
 import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.schema.MetaDataClient;
-import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.List;
+
 public class LocalIndexSplitter extends BaseRegionObserver {
 
     private static final Log LOG = LogFactory.getLog(LocalIndexSplitter.class);
@@ -73,7 +72,7 @@ public class LocalIndexSplitter extends BaseRegionObserver {
                         .getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
             TableName indexTable =
                     TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
-            if (!MetaReader.tableExists(rss.getCatalogTracker(), indexTable)) return;
+            if (!MetaTableAccessor.tableExists(rss.getConnection(), indexTable)) return;
 
             HRegion indexRegion = IndexUtil.getIndexRegion(environment);
             if (indexRegion == null) {
@@ -105,14 +104,16 @@ public class LocalIndexSplitter extends BaseRegionObserver {
                 copyOfParent.setOffline(true);
                 copyOfParent.setSplit(true);
                 // Put for parent
-                Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
-                MetaEditor.addDaughtersToPut(putParent, daughterRegions.getFirst().getRegionInfo(),
-                    daughterRegions.getSecond().getRegionInfo());
+                Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
+                MetaTableAccessor.addDaughtersToPut(putParent,
+                        daughterRegions.getFirst().getRegionInfo(),
+                        daughterRegions.getSecond().getRegionInfo());
                 metaEntries.add(putParent);
                 // Puts for daughters
-                Put putA = MetaEditor.makePutFromRegionInfo(daughterRegions.getFirst().getRegionInfo());
-                Put putB =
-                    MetaEditor.makePutFromRegionInfo(daughterRegions.getSecond().getRegionInfo());
+                Put putA = MetaTableAccessor.makePutFromRegionInfo(
+                        daughterRegions.getFirst().getRegionInfo());
+                Put putB = MetaTableAccessor.makePutFromRegionInfo(
+                        daughterRegions.getSecond().getRegionInfo());
                 st.addLocation(putA, rss.getServerName(), 1);
                 st.addLocation(putB, rss.getServerName(), 1);
                 metaEntries.add(putA);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
index 815ac1e..3b601b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.TraceScope;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.expression.Determinism;
@@ -58,9 +60,6 @@ import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.SizedUtil;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.TraceScope;
-import org.cloudera.htrace.impl.ProbabilitySampler;
 
 public class TraceQueryPlan implements QueryPlan {
 
@@ -124,14 +123,9 @@ public class TraceQueryPlan implements QueryPlan {
                 if(!first) return null;
                 TraceScope traceScope = conn.getTraceScope();
                 if (traceStatement.isTraceOn()) {
-                    double samplingRate = traceStatement.getSamplingRate();
-                    if (samplingRate >= 1.0) {
-                        conn.setSampler(Sampler.ALWAYS);
-                    } else if (samplingRate < 1.0 && samplingRate > 0.0) {
-                        conn.setSampler(new ProbabilitySampler(samplingRate));
-                    } else {
+                    conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
+                    if (conn.getSampler() == Sampler.NEVER) {
                         closeTraceScope(conn);
-                        conn.setSampler(Sampler.NEVER);
                     }
                     if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
                         traceScope = Tracing.startNewSpan(conn, "Enabling trace");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 25ac408..c3988a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -53,8 +53,8 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.Trace;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
 
 import com.google.common.collect.ImmutableList;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 94233c8..4ca2dee 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -66,7 +66,7 @@ import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.TraceScope;
 
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index b98d705..467746b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -54,8 +54,8 @@ import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.ServerUtil;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.Span;
+import org.apache.htrace.TraceScope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index a4fc96b..1c3d1e2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -65,9 +65,9 @@ import org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy;
 import org.apache.phoenix.hbase.index.write.recovery.TrackingParallelWriterIndexCommitter;
 import org.apache.phoenix.trace.TracingUtils;
 import org.apache.phoenix.trace.util.NullSpan;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.Trace;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
 
 import com.google.common.collect.Multimap;
 
@@ -475,7 +475,7 @@ public class Indexer extends BaseRegionObserver {
    *         present
    */
   private IndexedKeyValue getFirstIndexedKeyValue(WALEdit edit) {
-    for (KeyValue kv : edit.getKeyValues()) {
+    for (Cell kv : edit.getCells()) {
       if (kv instanceof IndexedKeyValue) {
         return (IndexedKeyValue) kv;
       }
@@ -490,7 +490,7 @@ public class Indexer extends BaseRegionObserver {
    */
   private Collection<Pair<Mutation, byte[]>> extractIndexUpdate(WALEdit edit) {
     Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
-    for (KeyValue kv : edit.getKeyValues()) {
+    for (Cell kv : edit.getCells()) {
       if (kv instanceof IndexedKeyValue) {
         IndexedKeyValue ikv = (IndexedKeyValue) kv;
         indexUpdates.add(new Pair<Mutation, byte[]>(ikv.getMutation(), ikv.getIndexTable()));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
index 296ff95..146028e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java
@@ -124,6 +124,11 @@ public class IndexLoadBalancer implements LoadBalancer {
     }
 
     @Override
+    public void onConfigurationChange(Configuration conf) {
+        setConf(conf);
+    }
+
+    @Override
     public void setClusterStatus(ClusterStatus st) {
         this.clusterStatus = st;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index 89489ec..7ae54ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -24,8 +24,10 @@ import java.util.SortedSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.IndexKeyValueSkipListSet;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
@@ -213,7 +215,7 @@ public class IndexMemStore implements KeyValueStore {
      * @return false if the key is null or if there is no data
      */
     @Override
-    public synchronized boolean seek(KeyValue key) {
+    public synchronized boolean seek(Cell key) {
       if (key == null) {
         close();
         return false;
@@ -221,16 +223,16 @@ public class IndexMemStore implements KeyValueStore {
 
       // kvset and snapshot will never be null.
       // if tailSet can't find anything, SortedSet is empty (not null).
-      kvsetIt = kvsetAtCreation.tailSet(key).iterator();
+      kvsetIt = kvsetAtCreation.tailSet(KeyValueUtil.ensureKeyValue(key)).iterator();
       kvsetItRow = null;
 
-      return seekInSubLists(key);
+      return seekInSubLists();
     }
 
     /**
      * (Re)initialize the iterators after a seek or a reseek.
      */
-    private synchronized boolean seekInSubLists(KeyValue key) {
+    private synchronized boolean seekInSubLists() {
       nextRow = getNext(kvsetIt);
       return nextRow != null;
     }
@@ -241,7 +243,7 @@ public class IndexMemStore implements KeyValueStore {
      * @return true if there is at least one KV to read, false otherwise
      */
     @Override
-    public synchronized boolean reseek(KeyValue key) {
+    public synchronized boolean reseek(Cell key) {
       /*
        * See HBASE-4195 & HBASE-3855 & HBASE-6591 for the background on this implementation. This
        * code is executed concurrently with flush and puts, without locks. Two points must be known
@@ -252,8 +254,9 @@ public class IndexMemStore implements KeyValueStore {
        * we iterated to and restore the reseeked set to at least that point.
        */
 
-      kvsetIt = kvsetAtCreation.tailSet(getHighest(key, kvsetItRow)).iterator();
-      return seekInSubLists(key);
+      KeyValue kv = KeyValueUtil.ensureKeyValue(key);
+      kvsetIt = kvsetAtCreation.tailSet(getHighest(kv, kvsetItRow)).iterator();
+      return seekInSubLists();
     }
 
     /*
@@ -272,18 +275,18 @@ public class IndexMemStore implements KeyValueStore {
     }
 
     @Override
-    public synchronized KeyValue peek() {
+    public synchronized Cell peek() {
       // DebugPrint.println(" MS@" + hashCode() + " peek = " + getLowest());
       return nextRow;
     }
 
     @Override
-    public synchronized KeyValue next() {
+    public synchronized Cell next() {
       if (nextRow == null) {
         return null;
       }
 
-      final KeyValue ret = nextRow;
+      final Cell ret = nextRow;
 
       // Advance the iterators
       nextRow = getNext(kvsetIt);
@@ -314,7 +317,7 @@ public class IndexMemStore implements KeyValueStore {
     }
 
     @Override
-    public boolean backwardSeek(KeyValue arg0) throws IOException {
+    public boolean backwardSeek(Cell arg0) throws IOException {
         throw new UnsupportedOperationException();
     }
 
@@ -324,7 +327,7 @@ public class IndexMemStore implements KeyValueStore {
     }
 
     @Override
-    public boolean seekToPreviousRow(KeyValue arg0) throws IOException {
+    public boolean seekToPreviousRow(Cell arg0) throws IOException {
         throw new UnsupportedOperationException();
     }
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
index 21eb5cf..554b394 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java
@@ -22,6 +22,7 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -78,9 +79,9 @@ public class LazyValueGetter implements ValueGetter {
       return null;
     }
     // there is a next value - we only care about the current value, so we can just snag that
-    KeyValue next = scan.next();
+    Cell next = scan.next();
     if (ref.matches(next)) {
-      return new ImmutableBytesPtr(next.getBuffer(), next.getValueOffset(), next.getValueLength());
+      return new ImmutableBytesPtr(next.getValueArray(), next.getValueOffset(), next.getValueLength());
     }
     return null;
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
index 6750be2..658ce91 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map.Entry;
 
 import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -169,7 +170,8 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec {
   /**
    * Get the next batch of primary table values for the given columns
    * @param refs columns to match against
-   * @param state
+   * @param kvs
+   * @param currentRow
    * @return the total length of all values found and the entries to add for the index
    */
   @SuppressWarnings("deprecation")
@@ -187,7 +189,7 @@ private Pair<Integer, List<ColumnEntry>> getNextEntries(List<CoveredColumn> refs
         continue;
       }
       // there is a next value - we only care about the current value, so we can just snag that
-      KeyValue next = kvs.next();
+      Cell next = kvs.next();
       if (ref.matchesFamily(next.getFamily()) && ref.matchesQualifier(next.getQualifier())) {
         byte[] v = next.getValue();
         totalValueLength += v.length;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
index de21d56..03ff760 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
@@ -205,8 +205,8 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
     @SuppressWarnings("deprecation")
     @Override
     public KeyValue getHint(KeyValue kv) {
-      return KeyValue.createLastOnRow(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),
-        kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getBuffer(),
+      return KeyValueUtil.createLastOnRow(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
+        kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(),
         kv.getQualifierOffset(), kv.getQualifierLength());
     }
   }
@@ -259,7 +259,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
       if (deleteColumn == null) {
         return false;
       }
-      if (CellUtil.matchingFamily(deleteColumn, next) && deleteColumn.matchingQualifier(next)) {
+      if (CellUtil.matchingFamily(deleteColumn, next) && CellUtil.matchingQualifier(deleteColumn, next)) {
         // falls within the timestamp range
         if (deleteColumn.getTimestamp() >= next.getTimestamp()) {
           return true;
@@ -280,7 +280,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
       // keyvalue has the exact timestamp or is an older (smaller) timestamp, and we can allow that
       // one.
       if (pointDelete != null && CellUtil.matchingFamily(pointDelete, next)
-          && pointDelete.matchingQualifier(next)) {
+          && CellUtil.matchingQualifier(pointDelete, next)) {
         if (pointDelete.getTimestamp() == next.getTimestamp()) {
           return true;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
index 4ea7295..ddb5850 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.hbase.index.covered.update;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -88,10 +89,9 @@ public class ColumnReference implements Comparable<ColumnReference> {
         return this.qualifierPtr;
     }
 
-  @SuppressWarnings("deprecation")
-  public boolean matches(KeyValue kv) {
-    if (matchesFamily(kv.getRowArray(), kv.getFamilyOffset(), kv.getFamilyLength())) {
-      return matchesQualifier(kv.getRowArray(), kv.getQualifierOffset(), kv.getQualifierLength());
+  public boolean matches(Cell kv) {
+    if (matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength())) {
+      return matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
     }
     return false;
   }
@@ -175,4 +175,4 @@ public class ColumnReference implements Comparable<ColumnReference> {
   public String toString() {
     return "ColumnReference - " + Bytes.toString(getFamily()) + ":" + Bytes.toString(getQualifier());
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
index 8e0b86f..1789b0e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
@@ -20,8 +20,10 @@ package org.apache.phoenix.hbase.index.ipc;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.RpcScheduler;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
@@ -43,24 +45,16 @@ public class PhoenixIndexRpcSchedulerFactory implements RpcSchedulerFactory {
             "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
 
     @Override
-    public RpcScheduler create(Configuration conf, RegionServerServices services) {
+    public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
         // create the delegate scheduler
         RpcScheduler delegate;
         try {
             // happens in <=0.98.4 where the scheduler factory is not visible
-            delegate = new SimpleRpcSchedulerFactory().create(conf, services);
+            delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
         } catch (IllegalAccessError e) {
             LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
             throw e;
         }
-        try {
-            // make sure we are on a version that phoenix can support
-            Class.forName("org.apache.hadoop.hbase.ipc.RpcExecutor");
-        } catch (ClassNotFoundException e) {
-            LOG.error(VERSION_TOO_OLD_FOR_INDEX_RPC
-                    + " Instead, using falling back to Simple RPC scheduling.");
-            return delegate;
-        }
 
         int indexHandlerCount = conf.getInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_HANDLER_COUNT);
         int minPriority = getMinPriority(conf);
@@ -85,6 +79,11 @@ public class PhoenixIndexRpcSchedulerFactory implements RpcSchedulerFactory {
         return scheduler;
     }
 
+    @Override
+    public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) {
+        return create(configuration, priorityFunction, null);
+    }
+
     public static int getMinPriority(Configuration conf) {
         return conf.getInt(QueryServices.MIN_INDEX_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index 1f16bef..bdf7126 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.hbase.index.scanner;
 import java.io.IOException;
 import java.util.SortedSet;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Scan;
@@ -49,7 +50,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     }
 
     @Override
-    public KeyValue peek() {
+    public Cell peek() {
         return delegate.peek();
     }
 
@@ -61,13 +62,13 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
      *         filters.
      */
     @Override
-    public KeyValue next() throws IOException {
+    public Cell next() throws IOException {
         seekToNextUnfilteredKeyValue();
         return delegate.next();
     }
 
     @Override
-    public boolean seek(KeyValue key) throws IOException {
+    public boolean seek(Cell key) throws IOException {
         if (filter.filterAllRemaining()) { return false; }
         // see if we can seek to the next key
         if (!delegate.seek(key)) { return false; }
@@ -78,7 +79,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     @SuppressWarnings("deprecation")
     private boolean seekToNextUnfilteredKeyValue() throws IOException {
         while (true) {
-            KeyValue peeked = delegate.peek();
+            Cell peeked = delegate.peek();
             // no more key values, so we are done
             if (peeked == null) { return false; }
 
@@ -103,13 +104,13 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     }
 
     @Override
-    public boolean reseek(KeyValue key) throws IOException {
+    public boolean reseek(Cell key) throws IOException {
         this.delegate.reseek(key);
         return this.seekToNextUnfilteredKeyValue();
     }
 
     @Override
-    public boolean requestSeek(KeyValue kv, boolean forward, boolean useBloom) throws IOException {
+    public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException {
         return this.reseek(kv);
     }
 
@@ -145,7 +146,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     }
 
     @Override
-    public boolean backwardSeek(KeyValue arg0) throws IOException {
+    public boolean backwardSeek(Cell arg0) throws IOException {
         return this.delegate.backwardSeek(arg0);
     }
 
@@ -155,7 +156,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     }
 
     @Override
-    public boolean seekToPreviousRow(KeyValue arg0) throws IOException {
+    public boolean seekToPreviousRow(Cell arg0) throws IOException {
         return this.delegate.seekToPreviousRow(arg0);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
index 868e892..43ddc45 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.hbase.index.scanner;
 import java.io.Closeable;
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 
 /**
@@ -33,7 +34,7 @@ public interface Scanner extends Closeable {
    * @return the next keyvalue in the scanner or <tt>null</tt> if there is no next {@link KeyValue}
    * @throws IOException if there is an underlying error reading the data
    */
-  public KeyValue next() throws IOException;
+  public Cell next() throws IOException;
 
   /**
    * Seek to immediately before the given {@link KeyValue}. If that exact {@link KeyValue} is
@@ -51,5 +52,5 @@ public interface Scanner extends Closeable {
    * @return the next {@link KeyValue} or <tt>null</tt> if there are no more values in <tt>this</tt>
    * @throws IOException if there is an error reading the underlying data.
    */
-  public KeyValue peek() throws IOException;
+  public Cell peek() throws IOException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
index 575779a..ff33ec2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
@@ -23,7 +23,9 @@ import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -110,7 +112,7 @@ public class ScannerBuilder {
     // create a scanner and wrap it as an iterator, meaning you can only go forward
     final FilteredKeyValueScanner kvScanner = new FilteredKeyValueScanner(filters, memstore);
     // seek the scanner to initialize it
-    KeyValue start = KeyValue.createFirstOnRow(update.getRow());
+    KeyValue start = KeyValueUtil.createFirstOnRow(update.getRow());
     try {
       if (!kvScanner.seek(start)) {
         return new EmptyScanner();
@@ -125,7 +127,7 @@ public class ScannerBuilder {
     return new Scanner() {
 
       @Override
-      public KeyValue next() {
+      public Cell next() {
         try {
           return kvScanner.next();
         } catch (IOException e) {
@@ -137,7 +139,7 @@ public class ScannerBuilder {
       public boolean seek(KeyValue next) throws IOException {
         // check to see if the next kv is after the current key, in which case we can use reseek,
         // which will be more efficient
-        KeyValue peek = kvScanner.peek();
+        Cell peek = kvScanner.peek();
         // there is another value and its before the requested one - we can do a reseek!
         if (peek != null) {
           int compare = KeyValue.COMPARATOR.compare(peek, next);
@@ -152,7 +154,7 @@ public class ScannerBuilder {
       }
 
       @Override
-      public KeyValue peek() throws IOException {
+      public Cell peek() throws IOException {
         return kvScanner.peek();
       }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index 0270de5..b04cf0a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -116,23 +116,6 @@ public class IndexedKeyValue extends KeyValue {
         return COLUMN_QUALIFIER.length;
     }
 
-    /**
-     * This is a KeyValue that shouldn't actually be replayed/replicated, so we always mark it as 
-     * an {@link WALEdit#METAFAMILY} so it isn't replayed/replicated via the normal replay mechanism
-     */
-    @Override
-    public boolean matchingFamily(final byte[] family) {
-        return Bytes.equals(family, WALEdit.METAFAMILY);
-    }
-    
-    /**
-     * Not a real KeyValue
-     */
-    @Override
-    public boolean matchingRow(final byte [] row) {
-        return false;
-    }
-
     @Override
     public String toString() {
         return "IndexWrite:\n\ttable: " + indexTableName + "\n\tmutation:" + mutation;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 630c8f5..732dd8b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -92,8 +92,8 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.TraceScope;
 
 import com.google.common.base.Objects;
 import com.google.common.base.Strings;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
index 1b9e31a..e92dd6a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
@@ -22,11 +22,11 @@ import org.apache.hadoop.metrics2.*;
 import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.metrics.Metrics;
-import org.cloudera.htrace.HTraceConfiguration;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.SpanReceiver;
-import org.cloudera.htrace.TimelineAnnotation;
-import org.cloudera.htrace.impl.MilliSpan;
+import org.apache.htrace.HTraceConfiguration;
+import org.apache.htrace.Span;
+import org.apache.htrace.SpanReceiver;
+import org.apache.htrace.TimelineAnnotation;
+import org.apache.htrace.impl.MilliSpan;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -153,11 +153,6 @@ public class TraceMetricSource implements SpanReceiver, MetricsSource {
     // noop
   }
 
-  @Override
-  public void configure(HTraceConfiguration conf) {
-    // noop
-  }
-
   private static class Metric {
 
     List<Pair<MetricsInfo, Long>> counters = new ArrayList<Pair<MetricsInfo, Long>>();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index f3fc81d..ccb9064 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -34,7 +34,7 @@ import org.apache.phoenix.metrics.MetricInfo;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.LogUtil;
-import org.cloudera.htrace.Span;
+import org.apache.htrace.Span;
 
 import com.google.common.base.Joiner;
 import com.google.common.primitives.Longs;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
index bee5a1c..4808f25 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingIterator.java
@@ -22,7 +22,7 @@ import java.sql.SQLException;
 import org.apache.phoenix.iterate.DelegateResultIterator;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.schema.tuple.Tuple;
-import org.cloudera.htrace.TraceScope;
+import org.apache.htrace.TraceScope;
 
 /**
  * A simple iterator that closes the trace scope when the iterator is closed.


[31/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

Posted by ap...@apache.org.
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bd974e7b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bd974e7b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bd974e7b

Branch: refs/heads/5.x-HBase-1.1
Commit: bd974e7b71e9fed74697b8ea86d887dddfb6daee
Parents: 260fe5c
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed May 20 09:55:23 2015 -0700

----------------------------------------------------------------------
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --------------
 .../covered/filter/TestFamilyOnlyFilter.java    | 106 -------------------
 2 files changed, 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bd974e7b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index d39b01d..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-    this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-    super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-    return done;
-  }
-
-  @Override
-  public void reset() {
-    done = false;
-    previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-    if (done) {
-      return ReturnCode.NEXT_ROW;
-    }
-    ReturnCode code = super.filterKeyValue(v);
-    if (previousMatchFound) {
-      // we found a match before, and now we are skipping the key because of the family, therefore
-      // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP) || code.equals(ReturnCode.NEXT_ROW)) {
-        done = true;
-      }
-    } else {
-      // if we haven't seen a match before, then it doesn't matter what we see now, except to mark
-      // if we've seen a match
-      if (code.equals(ReturnCode.INCLUDE)) {
-        previousMatchFound = true;
-      }
-    }
-    return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bd974e7b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 808e6bc..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.covered.filter.FamilyOnlyFilter;
-import org.junit.Test;
-
-/**
- * Test that the family only filter only allows a single family through
- */
-public class TestFamilyOnlyFilter {
-
-  byte[] row = new byte[] { 'a' };
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testPassesFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family!", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
-  }
-
-  @Test
-  public void testPassesTargetFamilyAsNonFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
-  }
-
-  @Test
-  public void testResetFilter() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
-
-    KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.NEXT_ROW, code);
-
-    // we shouldn't match the family again - everything after a switched family should be ignored
-    code = filter.filterKeyValue(accept);
-    assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.NEXT_ROW, code);
-
-    // reset the filter and we should accept it again
-    filter.reset();
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family after reset", ReturnCode.INCLUDE, code);
-  }
-}


[12/31] phoenix git commit: PHOENIX-1756 Add Month() and Second() buildin functions(Alicia Ying Shu)

Posted by ap...@apache.org.
PHOENIX-1756 Add Month() and Second() buildin functions(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6cb6a376
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6cb6a376
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6cb6a376

Branch: refs/heads/4.x-HBase-1.x
Commit: 6cb6a3766ca3b37ea6d410979d97d6daf7bdd10a
Parents: a8b27e3
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Mar 26 00:40:50 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Mar 26 00:40:50 2015 +0530

----------------------------------------------------------------------
 .../end2end/YearMonthSecondFunctionIT.java      | 48 ++++++++++-
 .../phoenix/expression/ExpressionType.java      |  8 +-
 .../expression/function/MonthFunction.java      | 83 ++++++++++++++++++++
 .../expression/function/SecondFunction.java     | 81 +++++++++++++++++++
 4 files changed, 217 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6cb6a376/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
index d19314a..da745fe 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
@@ -107,7 +107,7 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
                         "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
         conn.createStatement().execute(ddl);
         String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
-                "TO_DATE('2010-03-01 00:00:00'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
+                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
         conn.createStatement().execute(dml);
         conn.commit();
 
@@ -122,4 +122,50 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         assertEquals(2015, rs.getInt(6));
         assertFalse(rs.next());
     }
+
+    @Test
+    public void testMonthFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
+                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
+                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, MONTH(timestamps), MONTH(times), MONTH(unsignedDates), MONTH(unsignedTimestamps), " +
+                "MONTH(unsignedTimes) FROM T1 where MONTH(dates) = 3");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(4, rs.getInt(2));
+        assertEquals(5, rs.getInt(3));
+        assertEquals(6, rs.getInt(4));
+        assertEquals(7, rs.getInt(5));
+        assertEquals(12, rs.getInt(6));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testSecondFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
+                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:00:20'), TO_TIME('2008-05-16 10:00:30'), " +
+                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, SECOND(dates), SECOND(times), SECOND(unsignedDates), SECOND(unsignedTimestamps), " +
+                "SECOND(unsignedTimes) FROM T1 where SECOND(timestamps)=20");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(10, rs.getInt(2));
+        assertEquals(30, rs.getInt(3));
+        assertEquals(40, rs.getInt(4));
+        assertEquals(0, rs.getInt(5));
+        assertEquals(50, rs.getInt(6));
+        assertFalse(rs.next());
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6cb6a376/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index c871bc5..3f4fea7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -50,6 +50,7 @@ import org.apache.phoenix.expression.function.LpadFunction;
 import org.apache.phoenix.expression.function.MD5Function;
 import org.apache.phoenix.expression.function.MaxAggregateFunction;
 import org.apache.phoenix.expression.function.MinAggregateFunction;
+import org.apache.phoenix.expression.function.MonthFunction;
 import org.apache.phoenix.expression.function.NthValueFunction;
 import org.apache.phoenix.expression.function.PercentRankAggregateFunction;
 import org.apache.phoenix.expression.function.PercentileContAggregateFunction;
@@ -67,6 +68,7 @@ import org.apache.phoenix.expression.function.RoundTimestampExpression;
 import org.apache.phoenix.expression.function.SQLIndexTypeFunction;
 import org.apache.phoenix.expression.function.SQLTableTypeFunction;
 import org.apache.phoenix.expression.function.SQLViewTypeFunction;
+import org.apache.phoenix.expression.function.SecondFunction;
 import org.apache.phoenix.expression.function.SignFunction;
 import org.apache.phoenix.expression.function.SqlTypeNameFunction;
 import org.apache.phoenix.expression.function.StddevPopFunction;
@@ -194,9 +196,11 @@ public enum ExpressionType {
     ToTimeFunction(ToTimeFunction.class),
     ToTimestampFunction(ToTimestampFunction.class),
     SignFunction(SignFunction.class),
-    YearFunction(YearFunction.class)
+    YearFunction(YearFunction.class),
+    MonthFunction(MonthFunction.class),
+    SecondFunction(SecondFunction.class)
     ;
-    
+
     ExpressionType(Class<? extends Expression> clazz) {
         this.clazz = clazz;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6cb6a376/phoenix-core/src/main/java/org/apache/phoenix/expression/function/MonthFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/MonthFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/MonthFunction.java
new file mode 100644
index 0000000..5ad6c34
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/MonthFunction.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.joda.time.DateTime;
+
+/**
+ * 
+ * Implementation of the Month() buildin. Input Date/Timestamp/Time.
+ * Returns an integer from 1 to 12 representing the month omponent of date
+ * 
+ */
+@BuiltInFunction(name=MonthFunction.NAME, 
+args={@Argument(allowedTypes={PTimestamp.class})})
+public class MonthFunction extends ScalarFunction {
+    public static final String NAME = "MONTH";
+
+    public MonthFunction() {
+    }
+
+    public MonthFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+        Expression expression = getChildExpression();
+        if (!expression.evaluate(tuple, ptr)) {
+            return false;
+        }
+        if ( ptr.getLength() == 0) {
+            return true; //means null
+        }
+        long dateTime = expression.getDataType().getCodec().decodeLong(ptr, expression.getSortOrder());
+        DateTime dt = new DateTime(dateTime);
+        int month = dt.getMonthOfYear();
+        PDataType returnType = getDataType();
+        byte[] byteValue = new byte[returnType.getByteSize()];
+        returnType.getCodec().encodeInt(month, byteValue, 0);
+        ptr.set(byteValue);
+        return true;
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PInteger.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    private Expression getChildExpression() {
+        return children.get(0);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6cb6a376/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SecondFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SecondFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SecondFunction.java
new file mode 100644
index 0000000..5f39786
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SecondFunction.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PTimestamp;
+
+/**
+ * 
+ * Implementation of the Second() buildin. Input Date/Timestamp/Time.
+ * Returns an integer from 0 to 59 representing the second component of time
+ * 
+ */
+@BuiltInFunction(name=SecondFunction.NAME, 
+args={@Argument(allowedTypes={PTimestamp.class})})
+public class SecondFunction extends ScalarFunction {
+    public static final String NAME = "SECOND";
+
+    public SecondFunction() {
+    }
+
+    public SecondFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+        Expression expression = getChildExpression();
+        if (!expression.evaluate(tuple, ptr)) {
+            return false;
+        }
+        if ( ptr.getLength() == 0) {
+            return true; //means null
+        }
+        long dateTime = expression.getDataType().getCodec().decodeLong(ptr, expression.getSortOrder());
+        int sec = (int)((dateTime/1000) % 60);
+        PDataType returnType = getDataType();
+        byte[] byteValue = new byte[returnType.getByteSize()];
+        returnType.getCodec().encodeInt(sec, byteValue, 0);
+        ptr.set(byteValue);
+        return true;
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PInteger.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    private Expression getChildExpression() {
+        return children.get(0);
+    }
+}


[29/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

Posted by ap...@apache.org.
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e9623da1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e9623da1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e9623da1

Branch: refs/heads/4.x-HBase-1.0
Commit: e9623da1747f00158a2a291d23ba989361f44162
Parents: deb4786
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed May 20 09:54:27 2015 -0700

----------------------------------------------------------------------
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --------------
 .../covered/filter/TestFamilyOnlyFilter.java    | 106 -------------------
 2 files changed, 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e9623da1/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-    this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-    super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-    return done;
-  }
-
-  @Override
-  public void reset() {
-    done = false;
-    previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-    if (done) {
-      return ReturnCode.SKIP;
-    }
-    ReturnCode code = super.filterKeyValue(v);
-    if (previousMatchFound) {
-      // we found a match before, and now we are skipping the key because of the family, therefore
-      // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP)) {
-      done = true;
-      }
-    } else {
-      // if we haven't seen a match before, then it doesn't matter what we see now, except to mark
-      // if we've seen a match
-      if (code.equals(ReturnCode.INCLUDE)) {
-        previousMatchFound = true;
-      }
-    }
-    return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e9623da1/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.covered.filter.FamilyOnlyFilter;
-import org.junit.Test;
-
-/**
- * Test that the family only filter only allows a single family through
- */
-public class TestFamilyOnlyFilter {
-
-  byte[] row = new byte[] { 'a' };
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testPassesFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family!", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testPassesTargetFamilyAsNonFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testResetFilter() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    // we shouldn't match the family again - everything after a switched family should be ignored
-    code = filter.filterKeyValue(accept);
-    assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.SKIP, code);
-
-    // reset the filter and we should accept it again
-    filter.reset();
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family after reset", ReturnCode.INCLUDE, code);
-  }
-}


[02/31] phoenix git commit: PHOENIX-1642 Make Phoenix Master Branch pointing to HBase1.0.0

Posted by ap...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
index cee3b95..8bd918e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
@@ -19,7 +19,7 @@ package org.apache.phoenix.trace;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.cloudera.htrace.Span;
+import org.apache.htrace.Span;
 
 /**
  * Utilities for tracing

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
index 3799fdb..b4f70b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
@@ -21,8 +21,9 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.TimelineAnnotation;
+import org.apache.htrace.Span;
+import org.apache.htrace.TimelineAnnotation;
+import org.apache.phoenix.util.StringUtil;
 
 /**
  * Fake {@link Span} that doesn't save any state, in place of <tt>null</tt> return values, to avoid
@@ -109,4 +110,9 @@ public class NullSpan implements Span {
   public String getProcessId() {
     return null;
   }
+
+  @Override
+  public String toJson() {
+    return StringUtil.EMPTY_STRING;
+  }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index 7cd55e8..c9add01 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.trace.util;
 
 import static org.apache.phoenix.util.StringUtil.toBytes;
 
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
 import java.util.concurrent.Callable;
@@ -28,20 +29,22 @@ import javax.annotation.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.htrace.HTraceConfiguration;
 import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.call.CallWrapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.parse.TraceStatement;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.trace.TraceMetricSource;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.Trace;
-import org.cloudera.htrace.TraceScope;
-import org.cloudera.htrace.Tracer;
-import org.cloudera.htrace.impl.ProbabilitySampler;
-import org.cloudera.htrace.wrappers.TraceCallable;
-import org.cloudera.htrace.wrappers.TraceRunnable;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+import org.apache.htrace.Tracer;
+import org.apache.htrace.impl.ProbabilitySampler;
+import org.apache.htrace.wrappers.TraceCallable;
+import org.apache.htrace.wrappers.TraceRunnable;
 
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
@@ -58,10 +61,10 @@ public class Tracing {
     // Constants for tracing across the wire
     public static final String TRACE_ID_ATTRIBUTE_KEY = "phoenix.trace.traceid";
     public static final String SPAN_ID_ATTRIBUTE_KEY = "phoenix.trace.spanid";
-    
+
     // Constants for passing into the metrics system
     private static final String TRACE_METRIC_PREFIX = "phoenix.trace.instance";
-    
+
     /**
      * Manage the types of frequencies that we support. By default, we never turn on tracing.
      */
@@ -110,11 +113,12 @@ public class Tracing {
     private static Function<ConfigurationAdapter, Sampler<?>> CREATE_PROBABILITY =
             new Function<ConfigurationAdapter, Sampler<?>>() {
                 @Override
-                public Sampler<?> apply(ConfigurationAdapter conn) {
+                public Sampler<?> apply(ConfigurationAdapter conf) {
                     // get the connection properties for the probability information
-                    String probThresholdStr = conn.get(QueryServices.TRACING_PROBABILITY_THRESHOLD_ATTRIB, null);
-                    double threshold = probThresholdStr == null ? QueryServicesOptions.DEFAULT_TRACING_PROBABILITY_THRESHOLD : Double.parseDouble(probThresholdStr);
-                    return new ProbabilitySampler(threshold);
+                    Map<String, String> items = new HashMap<String, String>();
+                    items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY,
+                            conf.get(QueryServices.TRACING_PROBABILITY_THRESHOLD_ATTRIB, Double.toString(QueryServicesOptions.DEFAULT_TRACING_PROBABILITY_THRESHOLD)));
+                    return new ProbabilitySampler(HTraceConfiguration.fromMap(items));
                 }
             };
 
@@ -130,6 +134,19 @@ public class Tracing {
                 conf));
     }
 
+    public static Sampler<?> getConfiguredSampler(TraceStatement traceStatement) {
+      double samplingRate = traceStatement.getSamplingRate();
+      if (samplingRate >= 1.0) {
+          return Sampler.ALWAYS;
+      } else if (samplingRate < 1.0 && samplingRate > 0.0) {
+          Map<String, String> items = new HashMap<String, String>();
+          items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, Double.toString(samplingRate));
+          return new ProbabilitySampler(HTraceConfiguration.fromMap(items));
+      } else {
+          return Sampler.NEVER;
+      }
+    }
+
     private static Sampler<?> getSampler(String traceLevel, ConfigurationAdapter conf) {
         return Frequency.getSampler(traceLevel).builder.apply(conf);
     }
@@ -202,13 +219,13 @@ public class Tracing {
     public static CallWrapper withTracing(PhoenixConnection conn, String desc) {
         return new TracingWrapper(conn, desc);
     }
-    
+
     private static void addCustomAnnotationsToSpan(@Nullable Span span, @NotNull PhoenixConnection conn) {
         Preconditions.checkNotNull(conn);
-        
+
         if (span == null) {
         	return;
-        } 
+        }
 		Map<String, String> annotations = conn.getCustomTracingAnnotations();
 		// copy over the annotations as bytes
 		for (Map.Entry<String, String> annotation : annotations.entrySet()) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index c147f91..0ab9791 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -569,6 +569,10 @@ public class IndexUtil {
                     return cell.getMvccVersion();
                 }
 
+                @Override public long getSequenceId() {
+                    return cell.getSequenceId();
+                }
+
                 @Override
                 public byte[] getValueArray() {
                     return cell.getValueArray();
@@ -595,7 +599,7 @@ public class IndexUtil {
                 }
 
                 @Override
-                public short getTagsLength() {
+                public int getTagsLength() {
                     return cell.getTagsLength();
                 }
 
@@ -618,12 +622,6 @@ public class IndexUtil {
                 public byte[] getRow() {
                     return cell.getRow();
                 }
-
-                @Override
-                @Deprecated
-                public int getTagsLengthUnsigned() {
-                    return cell.getTagsLengthUnsigned();
-                }
             };
             // Wrap cell in cell that offsets row key
             result.set(i, newCell);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
index ec18d9b..8bd8c11 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.ipc;
 
 import static org.junit.Assert.assertEquals;
 
+import java.net.InetSocketAddress;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.TimeUnit;
@@ -37,6 +38,7 @@ import org.mockito.Mockito;
 public class PhoenixIndexRpcSchedulerTest {
 
     private static final Configuration conf = HBaseConfiguration.create();
+    private static final InetSocketAddress isa = new InetSocketAddress("localhost", 0);
 
     @Test
     public void testIndexPriorityWritesToIndexHandler() throws Exception {
@@ -86,7 +88,7 @@ public class PhoenixIndexRpcSchedulerTest {
     private void dispatchCallWithPriority(RpcScheduler scheduler, int priority) throws Exception {
         CallRunner task = Mockito.mock(CallRunner.class);
         RequestHeader header = RequestHeader.newBuilder().setPriority(priority).build();
-        RpcServer server = new RpcServer(null, "test-rpcserver", null, null, conf, scheduler);
+        RpcServer server = new RpcServer(null, "test-rpcserver", null, isa, conf, scheduler);
         RpcServer.Call call =
                 server.new Call(0, null, null, header, null, null, null, null, 10, null);
         Mockito.when(task.getCall()).thenReturn(call);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
index 4918bba..7d08c0d 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
@@ -30,13 +30,13 @@ public class PhoenixIndexRpcSchedulerFactoryTest {
     @Test
     public void ensureInstantiation() throws Exception {
         Configuration conf = new Configuration(false);
-        conf.setClass(HRegionServer.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+        conf.setClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
             PhoenixIndexRpcSchedulerFactory.class, RpcSchedulerFactory.class);
         // kinda lame that we copy the copy from the regionserver to do this and can't use a static
         // method, but meh
         try {
             Class<?> rpcSchedulerFactoryClass =
-                    conf.getClass(HRegionServer.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+                    conf.getClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
                         SimpleRpcSchedulerFactory.class);
             Object o = rpcSchedulerFactoryClass.newInstance();
             assertTrue(o instanceof PhoenixIndexRpcSchedulerFactory);
@@ -63,7 +63,7 @@ public class PhoenixIndexRpcSchedulerFactoryTest {
         setMinMax(conf, 0, 4);
         factory.create(conf, null);
 
-        setMinMax(conf, 101, 102);
+        setMinMax(conf, 201, 202);
         factory.create(conf, null);
 
         setMinMax(conf, 102, 101);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
index 8c15551..54db5d8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
@@ -76,7 +76,7 @@ public class TestLocalTableState {
       public Boolean answer(InvocationOnMock invocation) throws Throwable {
         List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
         KeyValue kv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
-        kv.setMvccVersion(0);
+        kv.setSequenceId(0);
         list.add(kv);
         return false;
       }
@@ -115,7 +115,7 @@ public class TestLocalTableState {
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
     final byte[] stored = Bytes.toBytes("stored-value");
     final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
-    storedKv.setMvccVersion(2);
+    storedKv.setSequenceId(2);
     Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
       @Override
       public Boolean answer(InvocationOnMock invocation) throws Throwable {
@@ -129,7 +129,7 @@ public class TestLocalTableState {
     LocalTableState table = new LocalTableState(env, state, m);
     // add the kvs from the mutation
     KeyValue kv = KeyValueUtil.ensureKeyValue(m.get(fam, qual).get(0));
-    kv.setMvccVersion(0);
+    kv.setSequenceId(0);
     table.addPendingUpdates(kv);
 
     // setup the lookup
@@ -161,7 +161,7 @@ public class TestLocalTableState {
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
     final KeyValue storedKv =
         new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value"));
-    storedKv.setMvccVersion(2);
+    storedKv.setSequenceId(2);
     Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
       @Override
       public Boolean answer(InvocationOnMock invocation) throws Throwable {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
index 41e7e65..42e0b03 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.covered.data.IndexMemStore;
 import org.junit.Test;
 
 public class TestIndexMemStore {
@@ -40,9 +39,9 @@ public class TestIndexMemStore {
     IndexMemStore store = new IndexMemStore(IndexMemStore.COMPARATOR);
     long ts = 10;
     KeyValue kv = new KeyValue(row, family, qual, ts, Type.Put, val);
-    kv.setMvccVersion(2);
+    kv.setSequenceId(2);
     KeyValue kv2 = new KeyValue(row, family, qual, ts, Type.Put, val2);
-    kv2.setMvccVersion(0);
+    kv2.setSequenceId(0);
     store.add(kv, true);
     // adding the exact same kv shouldn't change anything stored if not overwritting
     store.add(kv2, false);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 375b754..60c11d7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -216,7 +216,7 @@ public class TestWALRecoveryCaching {
         LOG.info("\t== Offline: " + server.getServerName());
         continue;
       }
-      List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
+      List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server.getRSRpcServices());
       LOG.info("\t" + server.getServerName() + " regions: " + regions);
     }
 
@@ -262,9 +262,9 @@ public class TestWALRecoveryCaching {
   }
 
   /**
-   * @param miniHBaseCluster
+   * @param cluster
    * @param server
-   * @param bs
+   * @param table
    * @return
    */
   private List<HRegion> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
@@ -281,9 +281,9 @@ public class TestWALRecoveryCaching {
   }
 
   /**
-   * @param miniHBaseCluster
-   * @param indexedTableName
-   * @param tableNameString
+   * @param cluster
+   * @param indexTable
+   * @param primaryTable
    */
   private ServerName ensureTablesLiveOnSameServer(MiniHBaseCluster cluster, byte[] indexTable,
       byte[] primaryTable) throws Exception {
@@ -366,7 +366,7 @@ public class TestWALRecoveryCaching {
     List<HRegion> indexRegions = cluster.getRegions(table);
     Set<ServerName> indexServers = new HashSet<ServerName>();
     for (HRegion region : indexRegions) {
-      indexServers.add(cluster.getServerHoldingRegion(region.getRegionName()));
+      indexServers.add(cluster.getServerHoldingRegion(null, region.getRegionName()));
     }
     return indexServers;
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
index a3a02ce..f42dbd7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
@@ -36,10 +36,10 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.junit.After;
@@ -70,13 +70,14 @@ public class TestPerRegionIndexWriteCache {
   @SuppressWarnings("deprecation")
 @Before
   public void setUp() throws Exception {
-      FileSystem newFS = FileSystem.get(TEST_UTIL.getConfiguration());
       Path hbaseRootDir = TEST_UTIL.getDataTestDir();
-      
+      TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString());
+
+      FileSystem newFS = FileSystem.newInstance(TEST_UTIL.getConfiguration());
       HRegionInfo hri = new HRegionInfo(tableName, null, null, false);
-      Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName); 
-      HLog wal = HLogFactory.createHLog(newFS, 
-          hbaseRootDir, "logs", TEST_UTIL.getConfiguration());
+      Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
+      WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), null, "TestPerRegionIndexWriteCache");
+      WAL wal = walFactory.getWAL(Bytes.toBytes("logs"));
       HTableDescriptor htd = new HTableDescriptor(tableName);
       HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
       htd.addFamily(a);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java b/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
index 4622959..eabcaca 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
@@ -24,8 +24,8 @@ import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.impl.MilliSpan;
+import org.apache.htrace.Span;
+import org.apache.htrace.impl.MilliSpan;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.Mockito;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index af01f6b..1777aa8 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -71,12 +71,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.cloudera.htrace</groupId>
+      <groupId>org.apache.htrace</groupId>
       <artifactId>htrace-core</artifactId>
     </dependency>
     <dependency>
       <groupId>io.netty</groupId>
-      <artifactId>netty</artifactId>
+      <artifactId>netty-all</artifactId>
     </dependency>
     <dependency>
       <groupId>commons-codec</groupId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 92b0ed6..0e656e7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -75,12 +75,12 @@
     <test.output.tofile>true</test.output.tofile>
 
     <!-- Hadoop Versions -->
-    <hbase.version>0.98.9-hadoop2</hbase.version>
-    <hadoop-two.version>2.2.0</hadoop-two.version>
+    <hbase.version>1.0.1-SNAPSHOT</hbase.version>
+    <hadoop-two.version>2.5.1</hadoop-two.version>
 
     <!-- Dependency versions -->
     <commons-cli.version>1.2</commons-cli.version>
-    <hadoop.version>1.0.4</hadoop.version>
+    <hadoop.version>2.5.1</hadoop.version>
     <pig.version>0.12.0</pig.version>
     <jackson.version>1.8.8</jackson.version>
     <antlr.version>3.5</antlr.version>
@@ -99,9 +99,9 @@
     <findbugs.version>1.3.2</findbugs.version>
     <jline.version>2.11</jline.version>
     <snappy.version>0.3</snappy.version>
-    <netty.version>3.6.6.Final</netty.version>
+    <netty.version>4.0.23.Final</netty.version>
     <commons-codec.version>1.7</commons-codec.version>
-    <htrace.version>2.04</htrace.version>
+    <htrace.version>3.1.0-incubating</htrace.version>
     <collections.version>3.2.1</collections.version>
     <jodatime.version>2.3</jodatime.version>
 
@@ -626,13 +626,13 @@
         <version>${slf4j.version}</version>
       </dependency>
       <dependency>
-        <groupId>org.cloudera.htrace</groupId>
+        <groupId>org.apache.htrace</groupId>
         <artifactId>htrace-core</artifactId>
         <version>${htrace.version}</version>
       </dependency>
       <dependency>
         <groupId>io.netty</groupId>
-        <artifactId>netty</artifactId>
+        <artifactId>netty-all</artifactId>
         <version>${netty.version}</version>
       </dependency>
       <dependency>


[24/31] phoenix git commit: PHOENIX-1681 Use the new Region Interface

Posted by ap...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index f2d4fb5..5a410ea 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -100,7 +100,7 @@ public class StatisticsWriter implements Closeable {
         statsWriterTable.close();
     }
 
-    public void splitStats(HRegion p, HRegion l, HRegion r, StatisticsCollector tracker, ImmutableBytesPtr cfKey,
+    public void splitStats(Region p, Region l, Region r, StatisticsCollector tracker, ImmutableBytesPtr cfKey,
             List<Mutation> mutations) throws IOException {
         if (tracker == null) { return; }
         boolean useMaxTimeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP;
@@ -108,8 +108,8 @@ public class StatisticsWriter implements Closeable {
             mutations.add(getLastStatsUpdatedTimePut(clientTimeStamp));
         }
         long readTimeStamp = useMaxTimeStamp ? HConstants.LATEST_TIMESTAMP : clientTimeStamp;
-        Result result = StatisticsUtil.readRegionStatistics(statsReaderTable, tableName, cfKey, p.getRegionName(),
-                readTimeStamp);
+        Result result = StatisticsUtil.readRegionStatistics(statsReaderTable, tableName, cfKey,
+            p.getRegionInfo().getRegionName(), readTimeStamp);
         if (result != null && !result.isEmpty()) {
         	Cell cell = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
         	Cell rowCountCell = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES);
@@ -119,13 +119,13 @@ public class StatisticsWriter implements Closeable {
 
                 GuidePostsInfo guidePostsRegionInfo = GuidePostsInfo.deserializeGuidePostsInfo(cell.getValueArray(),
                         cell.getValueOffset(), cell.getValueLength(), rowCount);
-                byte[] pPrefix = StatisticsUtil.getRowKey(tableName, cfKey, p.getRegionName());
+                byte[] pPrefix = StatisticsUtil.getRowKey(tableName, cfKey, p.getRegionInfo().getRegionName());
                 mutations.add(new Delete(pPrefix, writeTimeStamp));
                 
 	        	long byteSize = 0;
                 Cell byteSizeCell = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
-                int index = Collections.binarySearch(guidePostsRegionInfo.getGuidePosts(), r.getStartKey(),
+                int index = Collections.binarySearch(guidePostsRegionInfo.getGuidePosts(), r.getRegionInfo().getStartKey(),
                         Bytes.BYTES_COMPARATOR);
                 int size = guidePostsRegionInfo.getGuidePosts().size();
                 int midEndIndex, midStartIndex;
@@ -159,7 +159,7 @@ public class StatisticsWriter implements Closeable {
                             .getGuidePosts().subList(0, midEndIndex), leftRowCount);
                     tracker.clear();
 	                tracker.addGuidePost(cfKey, lguidePosts, leftByteCount, cell.getTimestamp());
-	                addStats(l.getRegionName(), tracker, cfKey, mutations);
+	                addStats(l.getRegionInfo().getRegionName(), tracker, cfKey, mutations);
 	            }
 	            if (midStartIndex < size) {
 	                GuidePostsInfo rguidePosts = new GuidePostsInfo(rightByteCount, guidePostsRegionInfo
@@ -167,7 +167,7 @@ public class StatisticsWriter implements Closeable {
                             rightRowCount);
 	                tracker.clear();
 	                tracker.addGuidePost(cfKey, rguidePosts, rightByteCount, cell.getTimestamp());
-	                addStats(r.getRegionName(), tracker, cfKey, mutations);
+	                addStats(r.getRegionInfo().getRegionName(), tracker, cfKey, mutations);
 	            }
         	}
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index ca25348..3bf6f23 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
@@ -305,47 +305,49 @@ public class IndexUtil {
         });
     }
 
-    public static HRegion getIndexRegion(RegionCoprocessorEnvironment environment)
+    public static Region getIndexRegion(RegionCoprocessorEnvironment environment)
             throws IOException {
-        HRegion dataRegion = environment.getRegion();
+        Region dataRegion = environment.getRegion();
         return getIndexRegion(dataRegion, environment.getRegionServerServices());
     }
 
-    public static HRegion
-            getIndexRegion(HRegion dataRegion, RegionServerCoprocessorEnvironment env)
+    public static Region
+            getIndexRegion(Region dataRegion, RegionServerCoprocessorEnvironment env)
                     throws IOException {
         return getIndexRegion(dataRegion, env.getRegionServerServices());
     }
 
-    public static HRegion getDataRegion(RegionCoprocessorEnvironment env) throws IOException {
-        HRegion indexRegion = env.getRegion();
+    public static Region getDataRegion(RegionCoprocessorEnvironment env) throws IOException {
+        Region indexRegion = env.getRegion();
         return getDataRegion(indexRegion, env.getRegionServerServices());
     }
 
-    public static HRegion
-            getDataRegion(HRegion indexRegion, RegionServerCoprocessorEnvironment env)
+    public static Region
+            getDataRegion(Region indexRegion, RegionServerCoprocessorEnvironment env)
                     throws IOException {
         return getDataRegion(indexRegion, env.getRegionServerServices());
     }
 
-    public static HRegion getIndexRegion(HRegion dataRegion, RegionServerServices rss) throws IOException {
+    public static Region getIndexRegion(Region dataRegion, RegionServerServices rss) throws IOException {
         TableName indexTableName =
                 TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(dataRegion.getTableDesc()
                         .getName()));
-        List<HRegion> onlineRegions = rss.getOnlineRegions(indexTableName);
-        for(HRegion indexRegion : onlineRegions) {
-            if (Bytes.compareTo(dataRegion.getStartKey(), indexRegion.getStartKey()) == 0) {
+        List<Region> onlineRegions = rss.getOnlineRegions(indexTableName);
+        for(Region indexRegion : onlineRegions) {
+            if (Bytes.compareTo(dataRegion.getRegionInfo().getStartKey(),
+                    indexRegion.getRegionInfo().getStartKey()) == 0) {
                 return indexRegion;
             }
         }
         return null;
     }
 
-    public static HRegion getDataRegion(HRegion indexRegion, RegionServerServices rss) throws IOException {
+    public static Region getDataRegion(Region indexRegion, RegionServerServices rss) throws IOException {
         TableName dataTableName = TableName.valueOf(MetaDataUtil.getUserTableName(indexRegion.getTableDesc().getNameAsString()));
-        List<HRegion> onlineRegions = rss.getOnlineRegions(dataTableName);
-        for(HRegion region : onlineRegions) {
-            if (Bytes.compareTo(indexRegion.getStartKey(), region.getStartKey()) == 0) {
+        List<Region> onlineRegions = rss.getOnlineRegions(dataTableName);
+        for(Region region : onlineRegions) {
+            if (Bytes.compareTo(indexRegion.getRegionInfo().getStartKey(),
+                    region.getRegionInfo().getStartKey()) == 0) {
                 return region;
             }
         }
@@ -466,7 +468,7 @@ public class IndexUtil {
     
     public static void wrapResultUsingOffset(final ObserverContext<RegionCoprocessorEnvironment> c,
             List<Cell> result, final int offset, ColumnReference[] dataColumns,
-            TupleProjector tupleProjector, HRegion dataRegion, IndexMaintainer indexMaintainer,
+            TupleProjector tupleProjector, Region dataRegion, IndexMaintainer indexMaintainer,
             byte[][] viewConstants, ImmutableBytesWritable ptr) throws IOException {
         if (tupleProjector != null) {
             // Join back to data table here by issuing a local get projecting

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
index e996b23..fa8bd85 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -65,7 +65,7 @@ public class TestLocalTableState {
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
     Mockito.when(env.getConfiguration()).thenReturn(conf);
 
-    HRegion region = Mockito.mock(HRegion.class);
+    Region region = Mockito.mock(Region.class);
     Mockito.when(env.getRegion()).thenReturn(region);
     RegionScanner scanner = Mockito.mock(RegionScanner.class);
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
@@ -108,7 +108,7 @@ public class TestLocalTableState {
     // setup mocks
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
 
-    HRegion region = Mockito.mock(HRegion.class);
+    Region region = Mockito.mock(Region.class);
     Mockito.when(env.getRegion()).thenReturn(region);
     RegionScanner scanner = Mockito.mock(RegionScanner.class);
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
@@ -154,7 +154,7 @@ public class TestLocalTableState {
     // setup mocks
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
 
-    HRegion region = Mockito.mock(HRegion.class);
+    Region region = Mockito.mock(Region.class);
     Mockito.when(env.getRegion()).thenReturn(region);
     RegionScanner scanner = Mockito.mock(RegionScanner.class);
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index ae577bd..b381e9f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -50,8 +50,8 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -201,7 +201,7 @@ public class TestWALRecoveryCaching {
 
     // kill the server where the tables live - this should trigger distributed log splitting
     // find the regionserver that matches the passed server
-    List<HRegion> online = new ArrayList<HRegion>();
+    List<Region> online = new ArrayList<Region>();
     online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared,
       testTable.getTableName()));
     online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared,
@@ -267,9 +267,9 @@ public class TestWALRecoveryCaching {
    * @param table
    * @return
    */
-  private List<HRegion> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
+  private List<Region> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
       byte[] table) {
-    List<HRegion> online = Collections.emptyList();
+    List<Region> online = Collections.emptyList();
     for (RegionServerThread rst : cluster.getRegionServerThreads()) {
       // if its the server we are going to kill, get the regions we want to reassign
       if (rst.getRegionServer().getServerName().equals(server)) {
@@ -305,14 +305,14 @@ public class TestWALRecoveryCaching {
       tryIndex = !tryIndex;
       for (ServerName server : servers) {
         // find the regionserver that matches the passed server
-        List<HRegion> online = getRegionsFromServerForTable(cluster, server, table);
+        List<Region> online = getRegionsFromServerForTable(cluster, server, table);
 
         LOG.info("Shutting down and reassigning regions from " + server);
         cluster.stopRegionServer(server);
         cluster.waitForRegionServerToStop(server, TIMEOUT);
 
         // force reassign the regions from the table
-        for (HRegion region : online) {
+        for (Region region : online) {
           cluster.getMaster().assignRegion(region.getRegionInfo());
         }
 
@@ -363,10 +363,9 @@ public class TestWALRecoveryCaching {
 
   private Set<ServerName> getServersForTable(MiniHBaseCluster cluster, byte[] table)
       throws Exception {
-    List<HRegion> indexRegions = cluster.getRegions(table);
     Set<ServerName> indexServers = new HashSet<ServerName>();
-    for (HRegion region : indexRegions) {
-      indexServers.add(cluster.getServerHoldingRegion(null, region.getRegionName()));
+    for (Region region : cluster.getRegions(table)) {
+      indexServers.add(cluster.getServerHoldingRegion(null, region.getRegionInfo().getRegionName()));
     }
     return indexServers;
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
index f42dbd7..c928d49 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
@@ -64,8 +64,8 @@ public class TestPerRegionIndexWriteCache {
     p2.add(family, qual, val);
   }
 
-  HRegion r1;
-  HRegion r2;
+  HRegion r1; // FIXME: Uses private type
+  HRegion r2; // FIXME: Uses private type
 
   @SuppressWarnings("deprecation")
 @Before


[30/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

Posted by ap...@apache.org.
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/166425db
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/166425db
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/166425db

Branch: refs/heads/4.x-HBase-1.x
Commit: 166425dba7ed851c45702c7cc4d7fb0e0c32b923
Parents: 0d74cff
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed May 20 09:54:33 2015 -0700

----------------------------------------------------------------------
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --------------
 .../covered/filter/TestFamilyOnlyFilter.java    | 106 -------------------
 2 files changed, 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/166425db/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-    this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-    super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-    return done;
-  }
-
-  @Override
-  public void reset() {
-    done = false;
-    previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-    if (done) {
-      return ReturnCode.SKIP;
-    }
-    ReturnCode code = super.filterKeyValue(v);
-    if (previousMatchFound) {
-      // we found a match before, and now we are skipping the key because of the family, therefore
-      // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP)) {
-      done = true;
-      }
-    } else {
-      // if we haven't seen a match before, then it doesn't matter what we see now, except to mark
-      // if we've seen a match
-      if (code.equals(ReturnCode.INCLUDE)) {
-        previousMatchFound = true;
-      }
-    }
-    return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/166425db/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.covered.filter.FamilyOnlyFilter;
-import org.junit.Test;
-
-/**
- * Test that the family only filter only allows a single family through
- */
-public class TestFamilyOnlyFilter {
-
-  byte[] row = new byte[] { 'a' };
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testPassesFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family!", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testPassesTargetFamilyAsNonFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testResetFilter() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    // we shouldn't match the family again - everything after a switched family should be ignored
-    code = filter.filterKeyValue(accept);
-    assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.SKIP, code);
-
-    // reset the filter and we should accept it again
-    filter.reset();
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family after reset", ReturnCode.INCLUDE, code);
-  }
-}


[15/31] phoenix git commit: The literal -1.0 (floating point) should not be converted to -1 (Integer) (Dave Hacker)

Posted by ap...@apache.org.
The literal -1.0 (floating point) should not be converted to -1 (Integer) (Dave Hacker)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/24ee2c66
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/24ee2c66
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/24ee2c66

Branch: refs/heads/4.x-HBase-1.x
Commit: 24ee2c66711664eb296d89522ecf8f6a950eb249
Parents: 8ea426c
Author: Thomas <td...@salesforce.com>
Authored: Thu Mar 26 13:11:35 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Thu Mar 26 13:11:35 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/ArithmeticQueryIT.java      | 28 ++++++++++++++++++++
 .../apache/phoenix/parse/ParseNodeFactory.java  |  4 ++-
 2 files changed, 31 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/24ee2c66/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
index 2df1827..72eb016 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
@@ -957,4 +957,32 @@ public class ArithmeticQueryIT extends BaseHBaseManagedTimeIT {
         assertTrue(rs.next());
         assertEquals(1.333333333, rs.getDouble(1), 0.001);
     }
+
+    @Test
+    public void testFloatingPointUpsert() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE test (id VARCHAR not null primary key, name VARCHAR, lat FLOAT)";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO test(id,name,lat) VALUES ('testid', 'testname', -1.00)";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT lat FROM test");
+        assertTrue(rs.next());
+        assertEquals(-1.0f, rs.getFloat(1), 0.001);
+    }
+
+    @Test
+    public void testFloatingPointMultiplicationUpsert() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE test (id VARCHAR not null primary key, name VARCHAR, lat FLOAT)";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO test(id,name,lat) VALUES ('testid', 'testname', -1.00 * 1)";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT lat FROM test");
+        assertTrue(rs.next());
+        assertEquals(-1.0f, rs.getFloat(1), 0.001);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/24ee2c66/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 931f327..eb1768c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -47,6 +47,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TypeMismatchException;
 import org.apache.phoenix.schema.stats.StatisticsCollectionScope;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.SchemaUtil;
 
@@ -577,7 +578,8 @@ public class ParseNodeFactory {
 
     public ParseNode negate(ParseNode child) {
         // Prevents reparsing of -1 from becoming 1*-1 and 1*1*-1 with each re-parsing
-        if (LiteralParseNode.ONE.equals(child)) {
+        if (LiteralParseNode.ONE.equals(child) && ((LiteralParseNode)child).getType().isCoercibleTo(
+                PLong.INSTANCE)) {
             return LiteralParseNode.MINUS_ONE;
         }
         return new MultiplyParseNode(Arrays.asList(child,LiteralParseNode.MINUS_ONE));


[18/31] phoenix git commit: PHOENIX-1792 Fix 4.x-HBase-1.x compilation error

Posted by ap...@apache.org.
PHOENIX-1792 Fix 4.x-HBase-1.x compilation error


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/709d8670
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/709d8670
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/709d8670

Branch: refs/heads/4.x-HBase-1.x
Commit: 709d867061dcc4bb7fea6153a9924bf52cfa2d08
Parents: f0c2ed4
Author: Thomas <td...@salesforce.com>
Authored: Fri Mar 27 12:08:10 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Fri Mar 27 12:08:10 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java | 2 +-
 .../phoenix/hbase/index/scanner/FilteredKeyValueScanner.java       | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/709d8670/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 654daf0..49e2022 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -387,7 +387,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
             // Added for compatibility with HBASE-13109
             // Once we drop support for older versions, add an @override annotation here
             // and figure out how to get the next indexed key
-            public byte[] getNextIndexedKey() {
+            public Cell getNextIndexedKey() {
                 return null; // indicate that we cannot use the optimization
             }
         };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/709d8670/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index bdf7126..7350eef 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -163,7 +163,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     // Added for compatibility with HBASE-13109
     // Once we drop support for older versions, add an @override annotation here
     // and figure out how to get the next indexed key
-    public byte[] getNextIndexedKey() {
+    public Cell getNextIndexedKey() {
         return null; // indicate that we cannot use the optimization
     }
 }
\ No newline at end of file


[10/31] phoenix git commit: PHOENIX-1744 Allow Integer, UnsignedInt and UnsignedLong to be Cast to TIMESTAMP (Dave Hacker)

Posted by ap...@apache.org.
PHOENIX-1744 Allow Integer, UnsignedInt and UnsignedLong to be Cast to TIMESTAMP (Dave Hacker)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/250474de
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/250474de
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/250474de

Branch: refs/heads/4.x-HBase-1.x
Commit: 250474deb381b376d5ed442186470f65b36a8117
Parents: 7de8ee1
Author: David <dh...@salesforce.com>
Authored: Wed Mar 18 13:37:20 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Tue Mar 24 14:00:20 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/ToDateFunctionIT.java       | 57 ++++++++++++++++++++
 .../phoenix/schema/types/PUnsignedLong.java     |  5 ++
 2 files changed, 62 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/250474de/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
index bda4ea5..8de39b7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
@@ -33,6 +33,7 @@ import java.sql.Timestamp;
 import java.util.Properties;
 
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.TypeMismatchException;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -176,4 +177,60 @@ public class ToDateFunctionIT extends BaseHBaseManagedTimeIT {
                 callToDateFunction(
                         customTimeZoneConn, "TO_DATE('1970-01-01', 'yyyy-MM-dd')").getTime());
     }
+    
+    @Test
+    public void testTimestampCast() throws SQLException {
+        Properties props = new Properties();
+        props.setProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, "GMT+1");
+        Connection customTimeZoneConn = DriverManager.getConnection(getUrl(), props);
+
+        assertEquals(
+            1426188807198L,
+                callToDateFunction(
+                        customTimeZoneConn, "CAST(1426188807198 AS TIMESTAMP)").getTime());
+        
+
+        try {
+            callToDateFunction(
+                    customTimeZoneConn, "CAST(22005 AS TIMESTAMP)");
+            fail();
+        } catch (TypeMismatchException e) {
+
+        }
+    }
+    
+    @Test
+    public void testUnsignedLongToTimestampCast() throws SQLException {
+        Properties props = new Properties();
+        props.setProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, "GMT+1");
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+        try {
+            conn.prepareStatement(
+                "create table TT("
+                        + "a unsigned_int not null, "
+                        + "b unsigned_int not null, "
+                        + "ts unsigned_long not null "
+                        + "constraint PK primary key (a, b, ts))").execute();
+            conn.commit();
+
+            conn.prepareStatement("upsert into TT values (0, 22120, 1426188807198)").execute();
+            conn.commit();
+            
+            ResultSet rs = conn.prepareStatement("select a, b, ts, CAST(ts AS TIMESTAMP) from TT").executeQuery();
+            assertTrue(rs.next());
+            assertEquals(new Date(1426188807198L), rs.getObject(4));
+            rs.close();
+
+            try {
+                rs = conn.prepareStatement("select a, b, ts, CAST(b AS TIMESTAMP) from TT").executeQuery();
+                fail();
+            } catch (TypeMismatchException e) {
+
+            }
+
+        } finally {
+            conn.close();
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/250474de/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java
index 67ae05a..a21ccc3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java
@@ -95,6 +95,11 @@ public class PUnsignedLong extends PWholeNumber<Long> {
   }
 
   @Override
+    public boolean isCastableTo(PDataType targetType) {
+      return super.isCastableTo(targetType) || targetType.isCoercibleTo(PTimestamp.INSTANCE);
+    }
+
+  @Override
   public boolean isCoercibleTo(PDataType targetType) {
     return targetType == this || targetType == PUnsignedDouble.INSTANCE || PLong.INSTANCE
         .isCoercibleTo(targetType);


[28/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

Posted by ap...@apache.org.
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ff5d8b93
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ff5d8b93
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ff5d8b93

Branch: refs/heads/4.x-HBase-0.98
Commit: ff5d8b930746a8db32d4a0751d223a0297659693
Parents: 1b943db
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed May 20 09:54:18 2015 -0700

----------------------------------------------------------------------
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --------------
 .../covered/filter/TestFamilyOnlyFilter.java    | 106 -------------------
 2 files changed, 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff5d8b93/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-    this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-    super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-    return done;
-  }
-
-  @Override
-  public void reset() {
-    done = false;
-    previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-    if (done) {
-      return ReturnCode.SKIP;
-    }
-    ReturnCode code = super.filterKeyValue(v);
-    if (previousMatchFound) {
-      // we found a match before, and now we are skipping the key because of the family, therefore
-      // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP)) {
-      done = true;
-      }
-    } else {
-      // if we haven't seen a match before, then it doesn't matter what we see now, except to mark
-      // if we've seen a match
-      if (code.equals(ReturnCode.INCLUDE)) {
-        previousMatchFound = true;
-      }
-    }
-    return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff5d8b93/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.covered.filter.FamilyOnlyFilter;
-import org.junit.Test;
-
-/**
- * Test that the family only filter only allows a single family through
- */
-public class TestFamilyOnlyFilter {
-
-  byte[] row = new byte[] { 'a' };
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testPassesFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family!", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testPassesTargetFamilyAsNonFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testResetFilter() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    // we shouldn't match the family again - everything after a switched family should be ignored
-    code = filter.filterKeyValue(accept);
-    assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.SKIP, code);
-
-    // reset the filter and we should accept it again
-    filter.reset();
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family after reset", ReturnCode.INCLUDE, code);
-  }
-}


[06/31] phoenix git commit: PHOENIX-1746 Pass through guidepost config params on UPDATE STATISTICS call

Posted by ap...@apache.org.
PHOENIX-1746 Pass through guidepost config params on UPDATE STATISTICS call


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/096586e6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/096586e6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/096586e6

Branch: refs/heads/4.x-HBase-1.x
Commit: 096586e65e2779433bf30c30e97f78ae2316365e
Parents: e06ceaf
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Mar 23 10:28:23 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Mar 23 10:29:29 2015 -0700

----------------------------------------------------------------------
 .../StatsCollectorWithSplitsAndMultiCFIT.java   |  6 ++++++
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |  4 ++--
 .../coprocessor/BaseScannerRegionObserver.java  |  6 ++++--
 .../UngroupedAggregateRegionObserver.java       |  4 +++-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  9 +++++----
 .../apache/phoenix/parse/ParseNodeFactory.java  |  4 ++--
 .../parse/UpdateStatisticsStatement.java        | 11 +++++++++-
 .../apache/phoenix/schema/MetaDataClient.java   | 19 ++++++++++++++----
 .../schema/stats/StatisticsCollector.java       | 21 +++++++++++++++-----
 9 files changed, 63 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
index c34d598..bcb3a0a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
@@ -136,6 +136,12 @@ public class StatsCollectorWithSplitsAndMultiCFIT extends StatsCollectorAbstract
             assertRowCountAndByteCount(info, rowCountArr[i], byteCountArr[i]);
             i++;
         }
+        
+        TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME_NEW);
+        String query = "UPDATE STATISTICS " + STATS_TEST_TABLE_NAME_NEW + " SET \"" + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(2000);
+        conn.createStatement().execute(query);
+        keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME_NEW);
+        assertEquals(6, keyRanges.size());
     }
 
     protected void assertRowCountAndByteCount(GuidePostsInfo info, long rowCount, long byteCount) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 6a2e3b9..0330a39 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -520,8 +520,8 @@ alter_table_node returns [AlterTableStatement ret]
     ;
 
 update_statistics_node returns [UpdateStatisticsStatement ret]
-	:   UPDATE STATISTICS t=from_table_name (s=INDEX | s=ALL | s=COLUMNS)?
-		{ret = factory.updateStatistics(factory.namedTable(null, t), s == null ? StatisticsCollectionScope.getDefault() : StatisticsCollectionScope.valueOf(SchemaUtil.normalizeIdentifier(s.getText())));}
+	:   UPDATE STATISTICS t=from_table_name (s=INDEX | s=ALL | s=COLUMNS)? (SET (p=properties))?
+		{ret = factory.updateStatistics(factory.namedTable(null, t), s == null ? StatisticsCollectionScope.getDefault() : StatisticsCollectionScope.valueOf(SchemaUtil.normalizeIdentifier(s.getText())), p);}
 	;
 
 prop_name returns [String ret]

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index c3988a0..a2269b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -38,6 +38,8 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
 import org.apache.phoenix.execute.TupleProjector;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.KeyValueColumnExpression;
@@ -53,8 +55,6 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
 
 import com.google.common.collect.ImmutableList;
 
@@ -85,6 +85,8 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     public static final String EXPECTED_UPPER_REGION_KEY = "_ExpectedUpperRegionKey";
     public static final String REVERSE_SCAN = "_ReverseScan";
     public static final String ANALYZE_TABLE = "_ANALYZETABLE";
+    public static final String GUIDEPOST_WIDTH_BYTES = "_GUIDEPOST_WIDTH_BYTES";
+    public static final String GUIDEPOST_PER_REGION = "_GUIDEPOST_PER_REGION";
     /**
      * Attribute name used to pass custom annotations in Scans and Mutations (later). Custom annotations
      * are used to augment log lines emitted by Phoenix. See https://issues.apache.org/jira/browse/PHOENIX-1198.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index fc37a84..e43e5e5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -162,8 +162,10 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         long ts = scan.getTimeRange().getMax();
         StatisticsCollector stats = null;
         if(ScanUtil.isAnalyzeTable(scan)) {
+            byte[] gp_width_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
+            byte[] gp_per_region_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
             // Let this throw, as this scan is being done for the sole purpose of collecting stats
-            stats = new StatisticsCollector(c.getEnvironment(), region.getRegionInfo().getTable().getNameAsString(), ts);
+            stats = new StatisticsCollector(c.getEnvironment(), region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, gp_per_region_bytes);
         }
         if (ScanUtil.isLocalIndex(scan)) {
             /*

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 996d243..f802ff4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -34,6 +34,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Pair;
@@ -717,8 +718,8 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
 
     private static class ExecutableUpdateStatisticsStatement extends UpdateStatisticsStatement implements
             CompilableStatement {
-        public ExecutableUpdateStatisticsStatement(NamedTableNode table, StatisticsCollectionScope scope) {
-            super(table, scope);
+        public ExecutableUpdateStatisticsStatement(NamedTableNode table, StatisticsCollectionScope scope, Map<String,Object> props) {
+            super(table, scope, props);
         }
 
         @SuppressWarnings("unchecked")
@@ -919,8 +920,8 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
         }
 
         @Override
-        public UpdateStatisticsStatement updateStatistics(NamedTableNode table, StatisticsCollectionScope scope) {
-            return new ExecutableUpdateStatisticsStatement(table, scope);
+        public UpdateStatisticsStatement updateStatistics(NamedTableNode table, StatisticsCollectionScope scope, Map<String,Object> props) {
+            return new ExecutableUpdateStatisticsStatement(table, scope, props);
         }
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 82ae821..931f327 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -357,8 +357,8 @@ public class ParseNodeFactory {
         return new DivideParseNode(children);
     }
 
-    public UpdateStatisticsStatement updateStatistics(NamedTableNode table, StatisticsCollectionScope scope) {
-      return new UpdateStatisticsStatement(table, scope);
+    public UpdateStatisticsStatement updateStatistics(NamedTableNode table, StatisticsCollectionScope scope, Map<String,Object> props) {
+      return new UpdateStatisticsStatement(table, scope, props);
     }
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java
index dff9f06..6f7b736 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java
@@ -21,6 +21,8 @@ import static org.apache.phoenix.schema.stats.StatisticsCollectionScope.ALL;
 import static org.apache.phoenix.schema.stats.StatisticsCollectionScope.COLUMNS;
 import static org.apache.phoenix.schema.stats.StatisticsCollectionScope.INDEX;
 
+import java.util.Map;
+
 import org.apache.phoenix.schema.stats.StatisticsCollectionScope;
 
 import com.sun.istack.NotNull;
@@ -28,9 +30,12 @@ import com.sun.istack.NotNull;
 
 public class UpdateStatisticsStatement extends SingleTableStatement {
     private final StatisticsCollectionScope scope;
-    public UpdateStatisticsStatement(NamedTableNode table, @NotNull StatisticsCollectionScope scope) {
+    private final Map<String,Object> props;
+    
+    public UpdateStatisticsStatement(NamedTableNode table, @NotNull StatisticsCollectionScope scope, Map<String,Object> props) {
         super(table, 0);
         this.scope = scope;
+        this.props = props;
     }
 
     public boolean updateColumns() {
@@ -43,5 +48,9 @@ public class UpdateStatisticsStatement extends SingleTableStatement {
 
     public boolean updateAll() {
         return scope == ALL;
+    }
+
+    public Map<String,Object> getProps() {
+        return props;
     };
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 7688531..2ba0cde 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -153,6 +153,7 @@ import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTable.ViewType;
 import org.apache.phoenix.schema.stats.PTableStats;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
@@ -618,7 +619,7 @@ public class MetaDataClient {
         PTable table = resolver.getTables().get(0).getTable();
         long rowCount = 0;
         if (updateStatisticsStmt.updateColumns()) {
-            rowCount += updateStatisticsInternal(table.getPhysicalName(), table);
+            rowCount += updateStatisticsInternal(table.getPhysicalName(), table, updateStatisticsStmt.getProps());
         }
         if (updateStatisticsStmt.updateIndex()) {
             // TODO: If our table is a VIEW with multiple indexes or a TABLE with local indexes,
@@ -626,7 +627,7 @@ public class MetaDataClient {
             // across all indexes in that case so that we don't re-calculate the same stats
             // multiple times.
             for (PTable index : table.getIndexes()) {
-                rowCount += updateStatisticsInternal(index.getPhysicalName(), index);
+                rowCount += updateStatisticsInternal(index.getPhysicalName(), index, updateStatisticsStmt.getProps());
             }
             // If analyzing the indexes of a multi-tenant table or a table with view indexes
             // then analyze all of those indexes too.
@@ -654,14 +655,14 @@ public class MetaDataClient {
                             return PTableStats.EMPTY_STATS;
                         }
                     };
-                    rowCount += updateStatisticsInternal(name, indexLogicalTable);
+                    rowCount += updateStatisticsInternal(name, indexLogicalTable, updateStatisticsStmt.getProps());
                 }
             }
         }
         return new MutationState((int)rowCount, connection);
     }
 
-    private long updateStatisticsInternal(PName physicalName, PTable logicalTable) throws SQLException {
+    private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map<String, Object> statsProps) throws SQLException {
         ReadOnlyProps props = connection.getQueryServices().getProps();
         final long msMinBetweenUpdates = props
                 .getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB,
@@ -691,6 +692,16 @@ public class MetaDataClient {
             Scan scan = plan.getContext().getScan();
             scan.setCacheBlocks(false);
             scan.setAttribute(BaseScannerRegionObserver.ANALYZE_TABLE, PDataType.TRUE_BYTES);
+            if (statsProps != null) {
+                Object gp_width = statsProps.get(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB);
+                if (gp_width != null) {
+                    scan.setAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES, PLong.INSTANCE.toBytes(gp_width));
+                }
+                Object gp_per_region = statsProps.get(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB);
+                if (gp_per_region != null) {
+                    scan.setAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION, PInteger.INSTANCE.toBytes(gp_per_region));
+                }
+            }
             MutationState mutationState = plan.execute();
             rowCount = mutationState.getUpdateCount();
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index ade0fba..d6f25c4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -38,6 +38,9 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.TimeKeeper;
 import org.slf4j.Logger;
@@ -65,15 +68,23 @@ public class StatisticsCollector {
     private Pair<Long,GuidePostsInfo> cachedGps = null;
 
     public StatisticsCollector(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp) throws IOException {
-        this(env, tableName, clientTimeStamp, null);
+        this(env, tableName, clientTimeStamp, null, null, null);
+    }
+
+    public StatisticsCollector(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp, byte[] gp_width_bytes, byte[] gp_per_region_bytes) throws IOException {
+        this(env, tableName, clientTimeStamp, null, gp_width_bytes, gp_per_region_bytes);
     }
 
     public StatisticsCollector(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp, byte[] family) throws IOException {
+        this(env, tableName, clientTimeStamp, family, null, null);
+    }
+
+    public StatisticsCollector(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp, byte[] family, byte[] gp_width_bytes, byte[] gp_per_region_bytes) throws IOException {
         Configuration config = env.getConfiguration();
-        int guidepostPerRegion = config.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, 
-                QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_PER_REGION);
-        long guidepostWidth = config.getLong(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
-                QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES);
+        int guidepostPerRegion = gp_per_region_bytes == null ? config.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, 
+                QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_PER_REGION) : PInteger.INSTANCE.getCodec().decodeInt(gp_per_region_bytes, 0, SortOrder.getDefault());
+        long guidepostWidth = gp_width_bytes == null ? config.getLong(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
+                QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES) : PLong.INSTANCE.getCodec().decodeInt(gp_width_bytes, 0, SortOrder.getDefault());
         this.guidepostDepth = StatisticsUtil.getGuidePostDepth(guidepostPerRegion, guidepostWidth, env.getRegion().getTableDesc());
         // Get the stats table associated with the current table on which the CP is
         // triggered


[25/31] phoenix git commit: PHOENIX-1681 Use the new Region Interface

Posted by ap...@apache.org.
PHOENIX-1681 Use the new Region Interface


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/260fe5ca
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/260fe5ca
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/260fe5ca

Branch: refs/heads/5.x-HBase-1.1
Commit: 260fe5cabd19f5333372798aad572cd73e07aa02
Parents: 41ad918
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed Apr 15 11:26:39 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed Apr 15 11:26:39 2015 -0700

----------------------------------------------------------------------
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  4 +-
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |  4 +-
 .../IndexHalfStoreFileReaderGenerator.java      |  9 +--
 .../regionserver/IndexSplitTransaction.java     | 65 +++++++-------------
 .../hbase/regionserver/LocalIndexMerger.java    | 16 ++---
 .../hbase/regionserver/LocalIndexSplitter.java  | 11 ++--
 .../coprocessor/BaseScannerRegionObserver.java  | 22 +++----
 .../GroupedAggregateRegionObserver.java         | 13 ++--
 .../coprocessor/MetaDataEndpointImpl.java       | 60 +++++++++---------
 .../phoenix/coprocessor/ScanRegionObserver.java | 17 ++---
 .../coprocessor/SequenceRegionObserver.java     | 16 ++---
 .../UngroupedAggregateRegionObserver.java       | 29 ++++-----
 .../hbase/index/covered/data/LocalTable.java    |  5 +-
 .../write/ParallelWriterIndexCommitter.java     |  8 ++-
 .../recovery/PerRegionIndexWriteCache.java      | 10 +--
 .../recovery/StoreFailuresInCachePolicy.java    |  4 +-
 .../TrackingParallelWriterIndexCommitter.java   |  8 ++-
 .../phoenix/index/PhoenixIndexBuilder.java      |  4 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java | 14 ++---
 .../schema/stats/StatisticsCollector.java       | 14 ++---
 .../phoenix/schema/stats/StatisticsScanner.java | 16 ++---
 .../phoenix/schema/stats/StatisticsWriter.java  | 16 ++---
 .../java/org/apache/phoenix/util/IndexUtil.java | 38 ++++++------
 .../index/covered/TestLocalTableState.java      |  8 +--
 .../index/write/TestWALRecoveryCaching.java     | 17 +++--
 .../recovery/TestPerRegionIndexWriteCache.java  |  4 +-
 26 files changed, 210 insertions(+), 222 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 3b8ff29..611ba68 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -159,7 +159,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
   }
 
   /**
-   * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
+   * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify
    * seqids.
    * @throws Exception on failure
    */
@@ -183,7 +183,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     builder.build(htd);
 
     // create the region + its WAL
-    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
+    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); // FIXME: Uses private type
     region0.close();
     region0.getWAL().close();
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
index d90733f..6b2309e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.util.EnvironmentEdge;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
@@ -312,7 +312,7 @@ public class EndToEndCoveredColumnsIndexBuilderIT {
     HTable primary = new HTable(UTIL.getConfiguration(), tableNameBytes);
 
     // overwrite the codec so we can verify the current state
-    HRegion region = UTIL.getMiniHBaseCluster().getRegions(tableNameBytes).get(0);
+    Region region = UTIL.getMiniHBaseCluster().getRegions(tableNameBytes).get(0);
     Indexer indexer =
         (Indexer) region.getCoprocessorHost().findCoprocessor(Indexer.class.getName());
     CoveredColumnsIndexBuilder builder =

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 1284dcf..94d5912 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -76,7 +76,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
             FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
             Reference r, Reader reader) throws IOException {
         TableName tableName = ctx.getEnvironment().getRegion().getTableDesc().getTableName();
-        HRegion region = ctx.getEnvironment().getRegion();
+        Region region = ctx.getEnvironment().getRegion();
         HRegionInfo childRegion = region.getRegionInfo();
         byte[] splitKey = null;
         if (reader == null && r != null) {
@@ -109,7 +109,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                     Pair<HRegionInfo, HRegionInfo> mergeRegions =
                             MetaTableAccessor.getRegionsFromMergeQualifier(ctx.getEnvironment()
                                     .getRegionServerServices().getConnection(),
-                                region.getRegionName());
+                                region.getRegionInfo().getRegionName());
                     if (mergeRegions == null || mergeRegions.getFirst() == null) return reader;
                     byte[] splitRow =
                             CellUtil.cloneRow(KeyValue.createKeyValueFromKey(r.getSplitKey()));
@@ -121,8 +121,9 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                         childRegion = mergeRegions.getSecond();
                         regionStartKeyInHFile = mergeRegions.getSecond().getStartKey();
                     }
-                    splitKey = KeyValue.createFirstOnRow(region.getStartKey().length == 0 ?
-                            new byte[region.getEndKey().length] : region.getStartKey()).getKey();
+                    splitKey = KeyValue.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 ?
+                        new byte[region.getRegionInfo().getEndKey().length] :
+                            region.getRegionInfo().getStartKey()).getKey();
                 } else {
                     HRegionInfo parentRegion = HRegionInfo.getHRegionInfo(result);
                     regionStartKeyInHFile =

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
index 3057a14..71bc520 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
@@ -65,31 +65,8 @@ import org.apache.zookeeper.data.Stat;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
-/**
- * Executes region split as a "transaction".  Call {@link #prepare()} to setup
- * the transaction, {@link #execute(Server, RegionServerServices)} to run the
- * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if execute fails.
- *
- * <p>Here is an example of how you would use this class:
- * <pre>
- *  SplitTransaction st = new SplitTransaction(this.conf, parent, midKey)
- *  if (!st.prepare()) return;
- *  try {
- *    st.execute(server, services);
- *  } catch (IOException ioe) {
- *    try {
- *      st.rollback(server, services);
- *      return;
- *    } catch (RuntimeException e) {
- *      myAbortable.abort("Failed split, abort");
- *    }
- *  }
- * </Pre>
- * <p>This class is not thread safe.  Caller needs ensure split is run by
- * one thread only.
- */
 @InterfaceAudience.Private
-public class IndexSplitTransaction extends SplitTransaction {
+public class IndexSplitTransaction extends SplitTransactionImpl { // FIXME: Extends private type
   private static final Log LOG = LogFactory.getLog(IndexSplitTransaction.class);
 
   /*
@@ -154,9 +131,9 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @param r Region to split
    * @param splitrow Row to split around
    */
-  public IndexSplitTransaction(final HRegion r, final byte [] splitrow) {
+  public IndexSplitTransaction(final Region r, final byte [] splitrow) {
     super(r , splitrow);
-    this.parent = r;
+    this.parent = (HRegion)r;
     this.splitrow = splitrow;
   }
 
@@ -217,7 +194,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @return Regions created
    */
   @Override
-  /* package */PairOfSameType<HRegion> createDaughters(final Server server,
+  /* package */PairOfSameType<Region> createDaughters(final Server server,
       final RegionServerServices services) throws IOException {
     LOG.info("Starting split of region " + this.parent);
     if ((server != null && server.isStopped()) ||
@@ -244,14 +221,14 @@ public class IndexSplitTransaction extends SplitTransaction {
         server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout",
           this.fileSplitTimeout);
 
-    PairOfSameType<HRegion> daughterRegions = stepsBeforePONR(server, services, testing);
+    PairOfSameType<Region> daughterRegions = stepsBeforePONR(server, services, testing);
 
     List<Mutation> metaEntries = new ArrayList<Mutation>();
     if (this.parent.getCoprocessorHost() != null) {
       if (this.parent.getCoprocessorHost().
           preSplitBeforePONR(this.splitrow, metaEntries)) {
         throw new IOException("Coprocessor bypassing region "
-            + this.parent.getRegionNameAsString() + " split.");
+            + this.parent.getRegionInfo().getRegionNameAsString() + " split.");
       }
       try {
         for (Mutation p : metaEntries) {
@@ -303,7 +280,7 @@ public class IndexSplitTransaction extends SplitTransaction {
   }
 
   @Override
-  public PairOfSameType<HRegion> stepsBeforePONR(final Server server,
+  public PairOfSameType<Region> stepsBeforePONR(final Server server,
       final RegionServerServices services, boolean testing) throws IOException {
     // Set ephemeral SPLITTING znode up in zk.  Mocked servers sometimes don't
     // have zookeeper so don't do zk stuff if server or zookeeper is null
@@ -313,7 +290,7 @@ public class IndexSplitTransaction extends SplitTransaction {
           parent.getRegionInfo(), server.getServerName(), hri_a, hri_b);
       } catch (KeeperException e) {
         throw new IOException("Failed creating PENDING_SPLIT znode on " +
-          this.parent.getRegionNameAsString(), e);
+          this.parent.getRegionInfo().getRegionNameAsString(), e);
       }
     }
     this.journal.add(JournalEntry.SET_SPLITTING_IN_ZK);
@@ -367,12 +344,12 @@ public class IndexSplitTransaction extends SplitTransaction {
     // stuff in fs that needs cleanup -- a storefile or two.  Thats why we
     // add entry to journal BEFORE rather than AFTER the change.
     this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
-    HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
+    Region a = this.parent.createDaughterRegionFromSplits(this.hri_a);
 
     // Ditto
     this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
-    HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
-    return new PairOfSameType<HRegion>(a, b);
+    Region b = this.parent.createDaughterRegionFromSplits(this.hri_b);
+    return new PairOfSameType<Region>(a, b);
   }
 
   /**
@@ -387,7 +364,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    */
   @Override
   /* package */void openDaughters(final Server server,
-      final RegionServerServices services, HRegion a, HRegion b)
+      final RegionServerServices services, Region a, Region b)
       throws IOException {
     boolean stopped = server != null && server.isStopped();
     boolean stopping = services != null && services.isStopping();
@@ -400,8 +377,8 @@ public class IndexSplitTransaction extends SplitTransaction {
           " because stopping=" + stopping + ", stopped=" + stopped);
     } else {
       // Open daughters in parallel.
-      DaughterOpener aOpener = new DaughterOpener(server, a);
-      DaughterOpener bOpener = new DaughterOpener(server, b);
+      DaughterOpener aOpener = new DaughterOpener(server, (HRegion)a);
+      DaughterOpener bOpener = new DaughterOpener(server, (HRegion)b);
       aOpener.start();
       bOpener.start();
       try {
@@ -444,7 +421,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    *          Call {@link #rollback(Server, RegionServerServices)}
    */
   /* package */void transitionZKNode(final Server server,
-      final RegionServerServices services, HRegion a, HRegion b)
+      final RegionServerServices services, Region a, Region b)
       throws IOException {
     // Tell master about split by updating zk.  If we fail, abort.
     if (server != null && server.getZooKeeper() != null) {
@@ -556,7 +533,7 @@ public class IndexSplitTransaction extends SplitTransaction {
         Thread.currentThread().interrupt();
       }
       throw new IOException("Failed getting SPLITTING znode on "
-        + parent.getRegionNameAsString(), e);
+        + parent.getRegionInfo().getRegionNameAsString(), e);
     }
   }
 
@@ -572,10 +549,10 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @see #rollback(Server, RegionServerServices)
    */
   @Override
-  public PairOfSameType<HRegion> execute(final Server server,
+  public PairOfSameType<Region> execute(final Server server,
       final RegionServerServices services)
   throws IOException {
-    PairOfSameType<HRegion> regions = createDaughters(server, services);
+    PairOfSameType<Region> regions = createDaughters(server, services);
     if (this.parent.getCoprocessorHost() != null) {
       this.parent.getCoprocessorHost().preSplitAfterPONR();
     }
@@ -583,8 +560,8 @@ public class IndexSplitTransaction extends SplitTransaction {
   }
 
   @Override
-  public PairOfSameType<HRegion> stepsAfterPONR(final Server server,
-      final RegionServerServices services, PairOfSameType<HRegion> regions)
+  public PairOfSameType<Region> stepsAfterPONR(final Server server,
+      final RegionServerServices services, PairOfSameType<Region> regions)
       throws IOException {
     openDaughters(server, services, regions.getFirst(), regions.getSecond());
     transitionZKNode(server, services, regions.getFirst(), regions.getSecond());
@@ -871,7 +848,7 @@ public class IndexSplitTransaction extends SplitTransaction {
           this.parent.initialize();
         } catch (IOException e) {
           LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " +
-            this.parent.getRegionNameAsString(), e);
+            this.parent.getRegionInfo().getRegionNameAsString(), e);
           throw new RuntimeException(e);
         }
         break;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
index add9b72..e361343 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
@@ -38,12 +38,12 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
 
     private static final Log LOG = LogFactory.getLog(LocalIndexMerger.class);
 
-    private RegionMergeTransaction rmt = null;
-    private HRegion mergedRegion = null;
+    private RegionMergeTransactionImpl rmt = null; // FIXME: Use of private type
+    private HRegion mergedRegion = null; // FIXME: Use of private type
 
     @Override
     public void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-            HRegion regionA, HRegion regionB, List<Mutation> metaEntries) throws IOException {
+            Region regionA, Region regionB, List<Mutation> metaEntries) throws IOException {
         HTableDescriptor tableDesc = regionA.getTableDesc();
         if (SchemaUtil.isSystemTable(tableDesc.getName())) {
             return;
@@ -56,14 +56,14 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
             TableName indexTable =
                     TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
             if (!MetaTableAccessor.tableExists(rs.getConnection(), indexTable)) return;
-            HRegion indexRegionA = IndexUtil.getIndexRegion(regionA, ctx.getEnvironment());
+            Region indexRegionA = IndexUtil.getIndexRegion(regionA, ctx.getEnvironment());
             if (indexRegionA == null) {
                 LOG.warn("Index region corresponindg to data region " + regionA
                         + " not in the same server. So skipping the merge.");
                 ctx.bypass();
                 return;
             }
-            HRegion indexRegionB = IndexUtil.getIndexRegion(regionB, ctx.getEnvironment());
+            Region indexRegionB = IndexUtil.getIndexRegion(regionB, ctx.getEnvironment());
             if (indexRegionB == null) {
                 LOG.warn("Index region corresponindg to region " + regionB
                         + " not in the same server. So skipping the merge.");
@@ -71,7 +71,7 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
                 return;
             }
             try {
-                rmt = new RegionMergeTransaction(indexRegionA, indexRegionB, false);
+                rmt = new RegionMergeTransactionImpl(indexRegionA, indexRegionB, false);
                 if (!rmt.prepare(rss)) {
                     LOG.error("Prepare for the index regions merge [" + indexRegionA + ","
                             + indexRegionB + "] failed. So returning null. ");
@@ -97,7 +97,7 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
 
     @Override
     public void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-            HRegion regionA, HRegion regionB, HRegion mergedRegion) throws IOException {
+            Region regionA, Region regionB, Region mergedRegion) throws IOException {
         if (rmt != null && this.mergedRegion != null) {
             RegionServerCoprocessorEnvironment environment = ctx.getEnvironment();
             HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
@@ -107,7 +107,7 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
 
     @Override
     public void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-            HRegion regionA, HRegion regionB) throws IOException {
+            Region regionA, Region regionB) throws IOException {
         HRegionServer rs = (HRegionServer) ctx.getEnvironment().getRegionServerServices();
         try {
             if (rmt != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
index 9af8251..7882e25 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
@@ -52,8 +52,8 @@ public class LocalIndexSplitter extends BaseRegionObserver {
 
     private static final Log LOG = LogFactory.getLog(LocalIndexSplitter.class);
 
-    private SplitTransaction st = null;
-    private PairOfSameType<HRegion> daughterRegions = null;
+    private SplitTransactionImpl st = null; // FIXME: Uses private type
+    private PairOfSameType<Region> daughterRegions = null;
     private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
     private static final int SPLIT_TXN_MINIMUM_SUPPORTED_VERSION = VersionUtil
             .encodeVersion("0.98.9");
@@ -74,17 +74,18 @@ public class LocalIndexSplitter extends BaseRegionObserver {
                     TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
             if (!MetaTableAccessor.tableExists(rss.getConnection(), indexTable)) return;
 
-            HRegion indexRegion = IndexUtil.getIndexRegion(environment);
+            Region indexRegion = IndexUtil.getIndexRegion(environment);
             if (indexRegion == null) {
                 LOG.warn("Index region corresponindg to data region " + environment.getRegion()
                         + " not in the same server. So skipping the split.");
                 ctx.bypass();
                 return;
             }
+            // FIXME: Uses private type
             try {
                 int encodedVersion = VersionUtil.encodeVersion(environment.getHBaseVersion());
                 if(encodedVersion >= SPLIT_TXN_MINIMUM_SUPPORTED_VERSION) {
-                    st = new SplitTransaction(indexRegion, splitKey);
+                    st = new SplitTransactionImpl(indexRegion, splitKey);
                     st.useZKForAssignment =
                             environment.getConfiguration().getBoolean("hbase.assignment.usezk",
                                 true);
@@ -98,7 +99,7 @@ public class LocalIndexSplitter extends BaseRegionObserver {
                     ctx.bypass();
                     return;
                 }
-                indexRegion.forceSplit(splitKey);
+                ((HRegion)indexRegion).forceSplit(splitKey);
                 daughterRegions = st.stepsBeforePONR(rss, rss, false);
                 HRegionInfo copyOfParent = new HRegionInfo(indexRegion.getRegionInfo());
                 copyOfParent.setOffline(true);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index fc74968..1cc3412 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -114,12 +114,12 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     }
 
 
-    private static void throwIfScanOutOfRegion(Scan scan, HRegion region) throws DoNotRetryIOException {
+    private static void throwIfScanOutOfRegion(Scan scan, Region region) throws DoNotRetryIOException {
         boolean isLocalIndex = ScanUtil.isLocalIndex(scan);
         byte[] lowerInclusiveScanKey = scan.getStartRow();
         byte[] upperExclusiveScanKey = scan.getStopRow();
-        byte[] lowerInclusiveRegionKey = region.getStartKey();
-        byte[] upperExclusiveRegionKey = region.getEndKey();
+        byte[] lowerInclusiveRegionKey = region.getRegionInfo().getStartKey();
+        byte[] upperExclusiveRegionKey = region.getRegionInfo().getEndKey();
         boolean isStaleRegionBoundaries;
         if (isLocalIndex) {
             byte[] expectedUpperRegionKey = scan.getAttribute(EXPECTED_UPPER_REGION_KEY);
@@ -201,7 +201,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 }
             }
         } catch (Throwable t) {
-            ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+            ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
             return null; // impossible
         }
     }
@@ -221,7 +221,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     protected RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
             final RegionScanner s, final int offset, final Scan scan,
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
-            final HRegion dataRegion, final IndexMaintainer indexMaintainer,
+            final Region dataRegion, final IndexMaintainer indexMaintainer,
             final byte[][] viewConstants, final TupleProjector projector,
             final ImmutableBytesWritable ptr) {
         return getWrappedScanner(c, s, null, null, offset, scan, dataColumns, tupleProjector,
@@ -246,7 +246,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             final RegionScanner s, final Set<KeyValueColumnExpression> arrayKVRefs,
             final Expression[] arrayFuncRefs, final int offset, final Scan scan,
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
-            final HRegion dataRegion, final IndexMaintainer indexMaintainer,
+            final Region dataRegion, final IndexMaintainer indexMaintainer,
             final byte[][] viewConstants, final KeyValueSchema kvSchema,
             final ValueBitSet kvSchemaBitSet, final TupleProjector projector,
             final ImmutableBytesWritable ptr) {
@@ -257,7 +257,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 try {
                     return s.next(results);
                 } catch (Throwable t) {
-                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
                     return false; // impossible
                 }
             }
@@ -267,7 +267,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 try {
                     return s.next(result, scannerContext);
                 } catch (Throwable t) {
-                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
                     return false; // impossible
                 }
             }
@@ -319,7 +319,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                     // There is a scanattribute set to retrieve the specific array element
                     return next;
                 } catch (Throwable t) {
-                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
                     return false; // impossible
                 }
             }
@@ -347,7 +347,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 // There is a scanattribute set to retrieve the specific array element
                 return next;
             } catch (Throwable t) {
-                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
                 return false; // impossible
             }
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 19a1663..d613688 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
@@ -112,8 +112,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
              * For local indexes, we need to set an offset on row key expressions to skip
              * the region start key.
              */
-            HRegion region = c.getEnvironment().getRegion();
-            offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
+            Region region = c.getEnvironment().getRegion();
+            offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
+                region.getRegionInfo().getEndKey().length;
             ScanUtil.setRowKeyOffset(scan, offset);
         }
 
@@ -128,7 +129,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
         List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
         TupleProjector tupleProjector = null;
-        HRegion dataRegion = null;
+        Region dataRegion = null;
         byte[][] viewConstants = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
 
@@ -415,7 +416,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
             }
 
-            HRegion region = c.getEnvironment().getRegion();
+            Region region = c.getEnvironment().getRegion();
             region.startRegionOperation();
             try {
                 synchronized (scanner) {
@@ -495,7 +496,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 // If we're calculating no aggregate functions, we can exit at the
                 // start of a new row. Otherwise, we have to wait until an agg
                 int countOffset = rowAggregators.length == 0 ? 1 : 0;
-                HRegion region = c.getEnvironment().getRegion();
+                Region region = c.getEnvironment().getRegion();
                 region.startRegionOperation();
                 try {
                     synchronized (scanner) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index fab1ad0..d40901e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -87,8 +87,8 @@ import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegion.RowLock;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Region.RowLock;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
@@ -333,7 +333,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         try {
             // TODO: check that key is within region.getStartKey() and region.getEndKey()
             // and return special code to force client to lookup region from meta.
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(key, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -363,7 +363,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
 
-    private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region,
+    private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, Region region,
             long clientTimeStamp) throws IOException, SQLException {
         Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
         RegionScanner scanner = region.getScanner(scan);
@@ -646,7 +646,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, stats);
     }
 
-    private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region,
+    private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, Region region,
         long clientTimeStamp) throws IOException {
         if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
             return null;
@@ -684,7 +684,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private PTable loadTable(RegionCoprocessorEnvironment env, byte[] key,
         ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp)
         throws IOException, SQLException {
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
         PTable table = metaDataCache.getIfPresent(cacheKey);
         // We always cache the latest version - fault in if not in cache
@@ -723,7 +723,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         schemaName, tableName);
             byte[] parentKey = parentTableName == null ? null : lockKey;
 
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(lockKey, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -787,7 +787,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         return;
                     }
                 }
-                // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the
+                // TODO: Switch this to Region#batchMutate when we want to support indexes on the
                 // system
                 // table. Basically, we get all the locks that we don't already hold for all the
                 // tableMetadata rows. This ensures we don't have deadlock situations (ensuring
@@ -797,7 +797,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 // on the system table. This is an issue because of the way we manage batch mutation
                 // in the
                 // Indexer.
-                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
+                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+                    HConstants.NO_NONCE);
 
                 // Invalidate the cache - the next getTable call will add it
                 // TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache
@@ -823,9 +824,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     }
 
 
-    private static void acquireLock(HRegion region, byte[] key, List<RowLock> locks)
+    private static void acquireLock(Region region, byte[] key, List<RowLock> locks)
         throws IOException {
-        RowLock rowLock = region.getRowLock(key);
+        RowLock rowLock = region.getRowLock(key, true);
         if (rowLock == null) {
             throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
         }
@@ -839,7 +840,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
      * TODO: should we pass a timestamp here?
      */
     @SuppressWarnings("deprecation")
-    private TableViewFinderResult findChildViews(HRegion region, byte[] tenantId, PTable table) throws IOException {
+    private TableViewFinderResult findChildViews(Region region, byte[] tenantId, PTable table) throws IOException {
         byte[] schemaName = table.getSchemaName().getBytes();
         byte[] tableName = table.getTableName().getBytes();
         boolean isMultiTenant = table.isMultiTenant();
@@ -928,7 +929,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     parentTableName == null ? lockKey : SchemaUtil.getTableKey(tenantIdBytes,
                         schemaName, tableName);
 
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(key, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -952,7 +953,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 }
                 Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                 // Commit the list of deletion.
-                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
+                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+                    HConstants.NO_NONCE);
                 long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
                 for (ImmutableBytesPtr ckey : invalidateList) {
                     metaDataCache.put(ckey, newDeletedTableMarker(currentTime));
@@ -981,7 +983,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
         long clientTimeStamp = MetaDataUtil.getClientTimeStamp(rowsToDelete);
 
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
 
         Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
@@ -1107,7 +1109,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
     private static interface ColumnMutator {
       MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
-          List<Mutation> tableMetadata, HRegion region,
+          List<Mutation> tableMetadata, Region region,
           List<ImmutableBytesPtr> invalidateList, List<RowLock> locks) throws IOException,
           SQLException;
     }
@@ -1121,7 +1123,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
         try {
             byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(key, region);
             if (result != null) {
                 return result;
@@ -1207,7 +1209,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     return result;
                 }
 
-                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
+                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+                    HConstants.NO_NONCE);
                 // Invalidate from cache
                 for (ImmutableBytesPtr invalidateKey : invalidateList) {
                     metaDataCache.invalidate(invalidateKey);
@@ -1235,7 +1238,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
                 @Override
                 public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
-                        List<Mutation> tableMetaData, HRegion region,
+                        List<Mutation> tableMetaData, Region region,
                         List<ImmutableBytesPtr> invalidateList, List<RowLock> locks) {
                     byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
                     byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
@@ -1319,14 +1322,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         // get the co-processor environment
         // TODO: check that key is within region.getStartKey() and region.getEndKey()
         // and return special code to force client to lookup region from meta.
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         /*
          * Lock directly on key, though it may be an index table. This will just prevent a table
          * from getting rebuilt too often.
          */
         final boolean wasLocked = (rowLock != null);
         if (!wasLocked) {
-            rowLock = region.getRowLock(key);
+            rowLock = region.getRowLock(key, true);
             if (rowLock == null) {
                 throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
             }
@@ -1370,7 +1373,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
                 @Override
                 public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
-                        List<Mutation> tableMetaData, HRegion region,
+                        List<Mutation> tableMetaData, Region region,
                         List<ImmutableBytesPtr> invalidateList, List<RowLock> locks)
                         throws IOException, SQLException {
                     byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
@@ -1518,7 +1521,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
             tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
             byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(key, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -1542,7 +1545,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             }
             PIndexState newState =
                     PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]);
-            RowLock rowLock = region.getRowLock(key);
+            RowLock rowLock = region.getRowLock(key, true);
             if (rowLock == null) {
                 throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
             }
@@ -1633,7 +1636,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
                         tableMetadata.add(p);
                     }
-                    region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
+                    region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+                        HConstants.NO_NONCE);
                     // Invalidate from cache
                     Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                     metaDataCache.invalidate(cacheKey);
@@ -1658,9 +1662,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
 
-    private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, HRegion region) {
-        byte[] startKey = region.getStartKey();
-        byte[] endKey = region.getEndKey();
+    private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, Region region) {
+        byte[] startKey = region.getRegionInfo().getStartKey();
+        byte[] endKey = region.getRegionInfo().getEndKey();
         if (Bytes.compareTo(startKey, key) <= 0
                 && (Bytes.compareTo(HConstants.LAST_ROW, endKey) == 0 || Bytes.compareTo(key,
                     endKey) < 0)) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 77e124d..54c688a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.cache.GlobalCache;
@@ -176,8 +176,9 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
              * For local indexes, we need to set an offset on row key expressions to skip
              * the region start key.
              */
-            HRegion region = c.getEnvironment().getRegion();
-            offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
+            Region region = c.getEnvironment().getRegion();
+            offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
+                region.getRegionInfo().getEndKey().length;
             ScanUtil.setRowKeyOffset(scan, offset);
         }
 
@@ -187,7 +188,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
         Expression[] arrayFuncRefs = deserializeArrayPostionalExpressionInfoFromScan(
                 scan, innerScanner, arrayKVRefs);
         TupleProjector tupleProjector = null;
-        HRegion dataRegion = null;
+        Region dataRegion = null;
         IndexMaintainer indexMaintainer = null;
         byte[][] viewConstants = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
@@ -231,7 +232,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
         TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), tenantId);
         long estSize = iterator.getEstimatedByteSize();
         final MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
-        final HRegion region = c.getEnvironment().getRegion();
+        final Region region = c.getEnvironment().getRegion();
         region.startRegionOperation();
         try {
             // Once we return from the first call to next, we've run through and cached
@@ -241,7 +242,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             long actualSize = iterator.getByteSize();
             chunk.resize(actualSize);
         } catch (Throwable t) {
-            ServerUtil.throwIOException(region.getRegionNameAsString(), t);
+            ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t);
             return null;
         } finally {
             region.closeRegionOperation();
@@ -273,7 +274,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
                     tuple = iterator.next();
                     return !isFilterDone();
                 } catch (Throwable t) {
-                    ServerUtil.throwIOException(region.getRegionNameAsString(), t);
+                    ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t);
                     return false;
                 }
             }
@@ -288,7 +289,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
                             iterator.close();
                         }
                     } catch (SQLException e) {
-                        ServerUtil.throwIOException(region.getRegionNameAsString(), e);
+                        ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), e);
                     } finally {
                         chunk.close();
                     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
index 7953933..9b5f040 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
@@ -38,8 +38,8 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegion.RowLock;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Region.RowLock;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -88,9 +88,9 @@ public class SequenceRegionObserver extends BaseRegionObserver {
                         QueryConstants.EMPTY_COLUMN_BYTES, timestamp, errorCodeBuf)));
     }
     
-    private static void acquireLock(HRegion region, byte[] key, List<RowLock> locks)
+    private static void acquireLock(Region region, byte[] key, List<RowLock> locks)
         throws IOException {
-        RowLock rowLock = region.getRowLock(key);
+        RowLock rowLock = region.getRowLock(key, true);
         if (rowLock == null) {
             throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
         }
@@ -114,7 +114,7 @@ public class SequenceRegionObserver extends BaseRegionObserver {
         // We need to set this to prevent region.increment from being called
         e.bypass();
         e.complete();
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         byte[] row = increment.getRow();
         List<RowLock> locks = Lists.newArrayList();
         TimeRange tr = increment.getTimeRange();
@@ -251,7 +251,7 @@ public class SequenceRegionObserver extends BaseRegionObserver {
                 }
                 // update the KeyValues on the server
                 Mutation[] mutations = new Mutation[]{put};
-                region.batchMutate(mutations);
+                region.batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
                 // return a Result with the updated KeyValues
                 return Result.create(cells);
             } finally {
@@ -345,7 +345,7 @@ public class SequenceRegionObserver extends BaseRegionObserver {
         // We need to set this to prevent region.append from being called
         e.bypass();
         e.complete();
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         byte[] row = append.getRow();
         List<RowLock> locks = Lists.newArrayList();
         region.startRegionOperation();
@@ -400,7 +400,7 @@ public class SequenceRegionObserver extends BaseRegionObserver {
                     }
                 }
                 Mutation[] mutations = new Mutation[]{m};
-                region.batchMutate(mutations);
+                region.batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
                 long serverTimestamp = MetaDataUtil.getClientTimeStamp(m);
                 // Return result with single KeyValue. The only piece of information
                 // the client cares about is the timestamp, which is the timestamp of

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 2d6d98a..d5cc486 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
@@ -125,7 +125,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         this.kvBuilder = GenericKeyValueBuilder.INSTANCE;
     }
 
-    private static void commitBatch(HRegion region, List<Mutation> mutations, byte[] indexUUID) throws IOException {
+    private static void commitBatch(Region region, List<Mutation> mutations, byte[] indexUUID) throws IOException {
       if (indexUUID != null) {
           for (Mutation m : mutations) {
               m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
@@ -133,7 +133,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
       }
       Mutation[] mutationArray = new Mutation[mutations.size()];
       // TODO: should we use the one that is all or none?
-      region.batchMutate(mutations.toArray(mutationArray));
+      region.batchMutate(mutations.toArray(mutationArray), HConstants.NO_NONCE, HConstants.NO_NONCE);
     }
 
     public static void serializeIntoScan(Scan scan) {
@@ -158,7 +158,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     @Override
     protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
         int offset = 0;
-        HRegion region = c.getEnvironment().getRegion();
+        Region region = c.getEnvironment().getRegion();
         long ts = scan.getTimeRange().getMax();
         StatisticsCollector stats = null;
         if(ScanUtil.isAnalyzeTable(scan)) {
@@ -172,7 +172,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
              * For local indexes, we need to set an offset on row key expressions to skip
              * the region start key.
              */
-            offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
+            offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
+                region.getRegionInfo().getEndKey().length;
             ScanUtil.setRowKeyOffset(scan, offset);
         }
 
@@ -212,7 +213,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             ptr = new ImmutableBytesWritable();
         }
         TupleProjector tupleProjector = null;
-        HRegion dataRegion = null;
+        Region dataRegion = null;
         byte[][] viewConstants = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
         boolean localIndexScan = ScanUtil.isLocalIndex(scan);
@@ -279,8 +280,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                                                 results);
                                         Put put = maintainer.buildUpdateMutation(kvBuilder,
                                             valueGetter, ptr, ts,
-                                            c.getEnvironment().getRegion().getStartKey(),
-                                            c.getEnvironment().getRegion().getEndKey());
+                                            c.getEnvironment().getRegion().getRegionInfo().getStartKey(),
+                                            c.getEnvironment().getRegion().getRegionInfo().getEndKey());
                                         indexMutations.add(put);
                                     }
                                 }
@@ -391,7 +392,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                         } catch (ConstraintViolationException e) {
                             // Log and ignore in count
                             logger.error(LogUtil.addCustomAnnotations("Failed to create row in " +
-                                region.getRegionNameAsString() + " with values " +
+                                region.getRegionInfo().getRegionNameAsString() + " with values " +
                                 SchemaUtil.toString(values),
                                 ScanUtil.getCustomAnnotations(scan)), e);
                             continue;
@@ -479,9 +480,9 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     }
 
     private void commitIndexMutations(final ObserverContext<RegionCoprocessorEnvironment> c,
-            HRegion region, List<Mutation> indexMutations) throws IOException {
+            Region region, List<Mutation> indexMutations) throws IOException {
         // Get indexRegion corresponding to data region
-        HRegion indexRegion = IndexUtil.getIndexRegion(c.getEnvironment());
+        Region indexRegion = IndexUtil.getIndexRegion(c.getEnvironment());
         if (indexRegion != null) {
             commitBatch(indexRegion, indexMutations, null);
         } else {
@@ -493,7 +494,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                 table = c.getEnvironment().getTable(indexTable);
                 table.batch(indexMutations);
             } catch (InterruptedException ie) {
-                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(),
+                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(),
                     ie);
             } finally {
                 if (table != null) table.close();
@@ -534,9 +535,9 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
 
 
     @Override
-    public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, HRegion l, HRegion r)
+    public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, Region l, Region r)
             throws IOException {
-        HRegion region = e.getEnvironment().getRegion();
+        Region region = e.getEnvironment().getRegion();
         TableName table = region.getRegionInfo().getTable();
         StatisticsCollector stats = null;
         try {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 71cc1d6..549fe8c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -24,12 +24,11 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -60,7 +59,7 @@ public class LocalTable implements LocalHBaseState {
     Scan s = IndexManagementUtil.newLocalStateScan(Collections.singletonList(columns));
     s.setStartRow(row);
     s.setStopRow(row);
-    HRegion region = this.env.getRegion();
+    Region region = this.env.getRegion();
     RegionScanner scanner = region.getScanner(s);
     List<Cell> kvs = new ArrayList<Cell>(1);
     boolean more = scanner.next(kvs);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index f72dec0..56bf637 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -21,11 +21,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure;
 import org.apache.phoenix.hbase.index.parallel.QuickFailingTaskRunner;
@@ -150,10 +151,11 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                         // as well.
                         try {
                             if (tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX)) {
-                                HRegion indexRegion = IndexUtil.getIndexRegion(env);
+                                Region indexRegion = IndexUtil.getIndexRegion(env);
                                 if (indexRegion != null) {
                                     throwFailureIfDone();
-                                    indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]));
+                                    indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]),
+                                        HConstants.NO_NONCE, HConstants.NO_NONCE);
                                     return null;
                                 }
                             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
index 4d5f667..26da2d5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
@@ -22,7 +22,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
@@ -32,8 +32,8 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 
 public class PerRegionIndexWriteCache {
 
-  private Map<HRegion, Multimap<HTableInterfaceReference, Mutation>> cache =
-      new HashMap<HRegion, Multimap<HTableInterfaceReference, Mutation>>();
+  private Map<Region, Multimap<HTableInterfaceReference, Mutation>> cache =
+      new HashMap<Region, Multimap<HTableInterfaceReference, Mutation>>();
 
 
   /**
@@ -43,7 +43,7 @@ public class PerRegionIndexWriteCache {
    * @return Get the edits for the given region. Returns <tt>null</tt> if there are no pending edits
    *         for the region
    */
-  public Multimap<HTableInterfaceReference, Mutation> getEdits(HRegion region) {
+  public Multimap<HTableInterfaceReference, Mutation> getEdits(Region region) {
     return cache.remove(region);
   }
 
@@ -52,7 +52,7 @@ public class PerRegionIndexWriteCache {
    * @param table
    * @param collection
    */
-  public void addEdits(HRegion region, HTableInterfaceReference table,
+  public void addEdits(Region region, HTableInterfaceReference table,
       Collection<Mutation> collection) {
     Multimap<HTableInterfaceReference, Mutation> edits = cache.get(region);
     if (edits == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
index f36affb..189f970 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
@@ -23,7 +23,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 
 import com.google.common.collect.Multimap;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
@@ -41,7 +41,7 @@ public class StoreFailuresInCachePolicy implements IndexFailurePolicy {
 
   private KillServerOnFailurePolicy delegate;
   private PerRegionIndexWriteCache cache;
-  private HRegion region;
+  private Region region;
 
   /**
    * @param failedIndexEdits cache to update when we find a failure

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
index 9171b53..b1b2656 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -23,11 +23,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.phoenix.hbase.index.CapturingAbortable;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
@@ -154,10 +155,11 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                             // index is pretty hacky. If we're going to keep this, we should revisit that
                             // as well.
                             if (tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX)) {
-                                HRegion indexRegion = IndexUtil.getIndexRegion(env);
+                                Region indexRegion = IndexUtil.getIndexRegion(env);
                                 if (indexRegion != null) {
                                     throwFailureIfDone();
-                                    indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]));
+                                    indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]),
+                                        HConstants.NO_NONCE, HConstants.NO_NONCE);
                                     return Boolean.TRUE;
                                 }
                             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index b5e6a63..7a45e21 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -28,8 +28,8 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder;
@@ -73,7 +73,7 @@ public class PhoenixIndexBuilder extends CoveredColumnsIndexBuilder {
         ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN);
         scanRanges.initializeScan(scan);
         scan.setFilter(scanRanges.getSkipScanFilter());
-        HRegion region = this.env.getRegion();
+        Region region = this.env.getRegion();
         RegionScanner scanner = region.getScanner(scan);
         // Run through the scanner using internal nextRaw method
         region.startRegionOperation();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
index 99e26d1..222aefb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
@@ -24,9 +24,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
@@ -166,14 +164,14 @@ public class PhoenixIndexCodec extends BaseIndexCodec {
             Mutation mutation = null;
             if (upsert) {
                 mutation =
-                        maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, state
-                                .getCurrentTimestamp(), env.getRegion().getStartKey(), env
-                                .getRegion().getEndKey());
+                        maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, state.getCurrentTimestamp(),
+                            env.getRegion().getRegionInfo().getStartKey(),
+                            env.getRegion().getRegionInfo().getEndKey());
             } else {
                 mutation =
-                        maintainer.buildDeleteMutation(kvBuilder, valueGetter, ptr, state
-                                .getPendingUpdate(), state.getCurrentTimestamp(), env.getRegion()
-                                .getStartKey(), env.getRegion().getEndKey());
+                        maintainer.buildDeleteMutation(kvBuilder, valueGetter, ptr, state.getPendingUpdate(),
+                            state.getCurrentTimestamp(), env.getRegion().getRegionInfo().getStartKey(),
+                            env.getRegion().getRegionInfo().getEndKey());
             }
             indexUpdate.setUpdate(mutation);
             if (scanner != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index d6f25c4..1b1985f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@ -106,7 +106,7 @@ public class StatisticsCollector {
         this.statsTable.close();
     }
 
-    public void updateStatistic(HRegion region) {
+    public void updateStatistic(Region region) {
         try {
             ArrayList<Mutation> mutations = new ArrayList<Mutation>();
             writeStatsToStatsTable(region, true, mutations, TimeKeeper.SYSTEM.getCurrentTime());
@@ -121,7 +121,7 @@ public class StatisticsCollector {
         }
     }
     
-    private void writeStatsToStatsTable(final HRegion region,
+    private void writeStatsToStatsTable(final Region region,
             boolean delete, List<Mutation> mutations, long currentTime) throws IOException {
         try {
             // update the statistics table
@@ -202,7 +202,7 @@ public class StatisticsCollector {
         }
     }
 
-    public InternalScanner createCompactionScanner(HRegion region, Store store, InternalScanner s) throws IOException {
+    public InternalScanner createCompactionScanner(Region region, Store store, InternalScanner s) throws IOException {
         // See if this is for Major compaction
         if (logger.isDebugEnabled()) {
             logger.debug("Compaction scanner created for stats");
@@ -211,13 +211,13 @@ public class StatisticsCollector {
         return getInternalScanner(region, store, s, cfKey);
     }
 
-    public void splitStats(HRegion parent, HRegion left, HRegion right) {
+    public void splitStats(Region parent, Region left, Region right) {
         try {
             if (logger.isDebugEnabled()) {
                 logger.debug("Collecting stats for split of " + parent.getRegionInfo() + " into " + left.getRegionInfo() + " and " + right.getRegionInfo());
             }
             List<Mutation> mutations = Lists.newArrayListWithExpectedSize(3);
-            for (byte[] fam : parent.getStores().keySet()) {
+            for (byte[] fam : parent.getTableDesc().getFamiliesKeys()) {
             	statsTable.splitStats(parent, left, right, this, new ImmutableBytesPtr(fam), mutations);
             }
             if (logger.isDebugEnabled()) {
@@ -230,7 +230,7 @@ public class StatisticsCollector {
         }
     }
 
-    protected InternalScanner getInternalScanner(HRegion region, Store store,
+    protected InternalScanner getInternalScanner(Region region, Store store,
             InternalScanner internalScan, ImmutableBytesPtr family) {
         return new StatisticsScanner(this, statsTable, region, internalScan, family);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 0e50923..47a7f29 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -26,8 +26,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
@@ -38,11 +38,11 @@ public class StatisticsScanner implements InternalScanner {
     private static final Log LOG = LogFactory.getLog(StatisticsScanner.class);
     private InternalScanner delegate;
     private StatisticsWriter stats;
-    private HRegion region;
+    private Region region;
     private StatisticsCollector tracker;
     private ImmutableBytesPtr family;
 
-    public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, HRegion region,
+    public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, Region region,
             InternalScanner delegate, ImmutableBytesPtr family) {
         this.tracker = tracker;
         this.stats = stats;
@@ -85,17 +85,17 @@ public class StatisticsScanner implements InternalScanner {
             // Just verify if this if fine
             ArrayList<Mutation> mutations = new ArrayList<Mutation>();
             if (LOG.isDebugEnabled()) {
-                LOG.debug("Deleting the stats for the region " + region.getRegionNameAsString()
+                LOG.debug("Deleting the stats for the region " + region.getRegionInfo().getRegionNameAsString()
                         + " as part of major compaction");
             }
-            stats.deleteStats(region.getRegionName(), this.tracker, family, mutations);
+            stats.deleteStats(region.getRegionInfo().getRegionName(), this.tracker, family, mutations);
             if (LOG.isDebugEnabled()) {
-                LOG.debug("Adding new stats for the region " + region.getRegionNameAsString()
+                LOG.debug("Adding new stats for the region " + region.getRegionInfo().getRegionNameAsString()
                         + " as part of major compaction");
             }
-            stats.addStats(region.getRegionName(), this.tracker, family, mutations);
+            stats.addStats(region.getRegionInfo().getRegionName(), this.tracker, family, mutations);
             if (LOG.isDebugEnabled()) {
-                LOG.debug("Committing new stats for the region " + region.getRegionNameAsString()
+                LOG.debug("Committing new stats for the region " + region.getRegionInfo().getRegionNameAsString()
                         + " as part of major compaction");
             }
             stats.commitStats(mutations);


[26/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

Posted by ap...@apache.org.
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a4b4e0e2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a4b4e0e2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a4b4e0e2

Branch: refs/heads/master
Commit: a4b4e0e2d862d5d4ee0f3a6f9587f53fe87d629f
Parents: c83ab9e
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed May 20 09:53:53 2015 -0700

----------------------------------------------------------------------
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --------------
 .../covered/filter/TestFamilyOnlyFilter.java    | 106 -------------------
 2 files changed, 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4b4e0e2/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-    this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-    super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-    return done;
-  }
-
-  @Override
-  public void reset() {
-    done = false;
-    previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-    if (done) {
-      return ReturnCode.SKIP;
-    }
-    ReturnCode code = super.filterKeyValue(v);
-    if (previousMatchFound) {
-      // we found a match before, and now we are skipping the key because of the family, therefore
-      // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP)) {
-      done = true;
-      }
-    } else {
-      // if we haven't seen a match before, then it doesn't matter what we see now, except to mark
-      // if we've seen a match
-      if (code.equals(ReturnCode.INCLUDE)) {
-        previousMatchFound = true;
-      }
-    }
-    return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4b4e0e2/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.covered.filter.FamilyOnlyFilter;
-import org.junit.Test;
-
-/**
- * Test that the family only filter only allows a single family through
- */
-public class TestFamilyOnlyFilter {
-
-  byte[] row = new byte[] { 'a' };
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testPassesFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family!", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testPassesTargetFamilyAsNonFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testResetFilter() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    // we shouldn't match the family again - everything after a switched family should be ignored
-    code = filter.filterKeyValue(accept);
-    assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.SKIP, code);
-
-    // reset the filter and we should accept it again
-    filter.reset();
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family after reset", ReturnCode.INCLUDE, code);
-  }
-}


[09/31] phoenix git commit: PHOENIX-1653 Support separate clusters for MR jobs

Posted by ap...@apache.org.
PHOENIX-1653 Support separate clusters for MR jobs

Add support for the input and output formats of a Phoenix MapReduce job to
point to separate clusters using override configuration settings. Defaults to
existing behavior (HConstants.ZOOKEEPER_QUORUM)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7de8ee1e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7de8ee1e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7de8ee1e

Branch: refs/heads/4.x-HBase-1.x
Commit: 7de8ee1e914f5e0008ca9d983869757e4ca92b78
Parents: f4180fa
Author: gjacoby <gj...@salesforce.com>
Authored: Fri Feb 27 16:49:14 2015 -0800
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Tue Mar 24 20:07:52 2015 +0100

----------------------------------------------------------------------
 .../phoenix/mapreduce/PhoenixInputFormat.java   | 15 ++--
 .../phoenix/mapreduce/PhoenixRecordWriter.java  |  2 +-
 .../phoenix/mapreduce/index/IndexTool.java      |  2 +-
 .../index/PhoenixIndexImportMapper.java         |  2 +-
 .../phoenix/mapreduce/util/ConnectionUtil.java  | 88 ++++++++++++++------
 .../util/PhoenixConfigurationUtil.java          | 72 ++++++++++++++--
 .../mapreduce/util/PhoenixMapReduceUtil.java    | 22 ++++-
 .../util/PhoenixConfigurationUtilTest.java      | 60 ++++++++++++-
 .../pig/util/QuerySchemaParserFunction.java     |  2 +-
 .../pig/util/SqlQueryToColumnInfoFunction.java  |  2 +-
 10 files changed, 219 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index a83b9ae..31759b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -98,15 +98,16 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
      * @throws IOException
      * @throws SQLException
      */
-    private QueryPlan getQueryPlan(final JobContext context,final Configuration configuration) throws IOException {
+    private QueryPlan getQueryPlan(final JobContext context, final Configuration configuration)
+            throws IOException {
         Preconditions.checkNotNull(context);
-        try{
+        try {
             final String currentScnValue = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE);
             final Properties overridingProps = new Properties();
             if(currentScnValue != null) {
                 overridingProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, currentScnValue);
             }
-            final Connection connection = ConnectionUtil.getConnection(configuration,overridingProps);
+            final Connection connection = ConnectionUtil.getInputConnection(configuration, overridingProps);
             final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration);
             Preconditions.checkNotNull(selectStatement);
             final Statement statement = connection.createStatement();
@@ -116,9 +117,11 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
             // Initialize the query plan so it sets up the parallel scans
             queryPlan.iterator();
             return queryPlan;
-        } catch(Exception exception) {
-            LOG.error(String.format("Failed to get the query plan with error [%s]",exception.getMessage()));
+        } catch (Exception exception) {
+            LOG.error(String.format("Failed to get the query plan with error [%s]",
+                exception.getMessage()));
             throw new RuntimeException(exception);
         }
-   }
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
index 4d26bf4..5843076 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
@@ -46,7 +46,7 @@ public class PhoenixRecordWriter<T extends DBWritable>  extends RecordWriter<Nul
     private long numRecords = 0;
     
     public PhoenixRecordWriter(final Configuration configuration) throws SQLException {
-        this.conn = ConnectionUtil.getConnection(configuration);
+        this.conn = ConnectionUtil.getOutputConnection(configuration);
         this.batchSize = PhoenixConfigurationUtil.getBatchSize(configuration);
         final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration);
         this.statement = this.conn.prepareStatement(upsertQuery);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index d93ef9c..300f575 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -163,7 +163,7 @@ public class IndexTool extends Configured implements Tool {
             final String qDataTable = SchemaUtil.getTableName(schemaName, dataTable);
             final String qIndexTable = SchemaUtil.getTableName(schemaName, indexTable);
          
-            connection = ConnectionUtil.getConnection(configuration);
+            connection = ConnectionUtil.getInputConnection(configuration);
             if(!isValidIndexTable(connection, dataTable, indexTable)) {
                 throw new IllegalArgumentException(String.format(" %s is not an index table for %s ",qIndexTable,qDataTable));
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
index 7bf4bfc..30f6dc0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
@@ -73,7 +73,7 @@ public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexD
             indexTableName = PhoenixConfigurationUtil.getOutputTableName(configuration);
             final Properties overrideProps = new Properties ();
             overrideProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE));
-            connection = ConnectionUtil.getConnection(configuration,overrideProps);
+            connection = ConnectionUtil.getOutputConnection(configuration,overrideProps);
             connection.setAutoCommit(false);
             final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration);
             this.pStatement = connection.prepareStatement(upsertQuery);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
index 3234967..e677104 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
@@ -24,49 +24,89 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Properties;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.util.QueryUtil;
 
-import com.google.common.base.Preconditions;
-
 /**
  * Utility class to return a {@link Connection} .
  */
 public class ConnectionUtil {
+
+
+    /**
+     * Retrieve the configured input Connection.
+     *
+     * @param conf configuration containing connection information
+     * @return the configured input connection
+     */
+    public static Connection getInputConnection(final Configuration conf) throws SQLException {
+        return getInputConnection(conf, new Properties());
+    }
     
     /**
-     * Returns the {@link Connection} from Configuration
-     * @param configuration
-     * @return
-     * @throws SQLException
+     * Retrieve the configured input Connection.
+     *
+     * @param conf configuration containing connection information
+     * @param props custom connection properties
+     * @return the configured input connection
+     */
+    public static Connection getInputConnection(final Configuration conf , final Properties props) throws SQLException {
+        Preconditions.checkNotNull(conf);
+        return getConnection(PhoenixConfigurationUtil.getInputCluster(conf),
+                extractProperties(props, conf));
+    }
+
+    /**
+     * Create the configured output Connection.
+     *
+     * @param conf configuration containing the connection information
+     * @return the configured output connection
      */
-    public static Connection getConnection(final Configuration configuration) throws SQLException {
-        return getConnection(configuration, null);
+    public static Connection getOutputConnection(final Configuration conf) throws SQLException {
+        return getOutputConnection(conf, new Properties());
     }
     
     /**
-     * Used primarily in cases where we need to pass few additional/overriding properties 
-     * @param configuration
-     * @param properties
-     * @return
-     * @throws SQLException
+     * Create the configured output Connection.
+     *
+     * @param conf configuration containing the connection information
+     * @param props custom connection properties
+     * @return the configured output connection
+     */
+    public static Connection getOutputConnection(final Configuration conf, Properties props) throws SQLException {
+        Preconditions.checkNotNull(conf);
+        return getConnection(PhoenixConfigurationUtil.getOutputCluster(conf),
+                extractProperties(props, conf));
+    }
+
+    /**
+     * Returns the {@link Connection} from a ZooKeeper cluster string.
+     *
+     * @param quorum a ZooKeeper quorum connection string
+     * @return a Phoenix connection to the given connection string
      */
-    public static Connection getConnection(final Configuration configuration , final Properties properties) throws SQLException {
-        Preconditions.checkNotNull(configuration);
-        final Properties props = new Properties();
-        Iterator<Map.Entry<String, String>> iterator = configuration.iterator();
+    private static Connection getConnection(final String quorum, Properties props) throws SQLException {
+        Preconditions.checkNotNull(quorum);
+        return DriverManager.getConnection(QueryUtil.getUrl(quorum), props);
+    }
+
+    /**
+     * Add properties from the given Configuration to the provided Properties.
+     *
+     * @param props properties to which connection information from the Configuration will be added
+     * @param conf configuration containing connection information
+     * @return the input Properties value, with additional connection information from the
+     * given Configuration
+     */
+    private static Properties extractProperties(Properties props, final Configuration conf) {
+        Iterator<Map.Entry<String, String>> iterator = conf.iterator();
         if(iterator != null) {
             while (iterator.hasNext()) {
                 Map.Entry<String, String> entry = iterator.next();
                 props.setProperty(entry.getKey(), entry.getValue());
             }
         }
-        if(properties != null && !properties.isEmpty()) {
-            props.putAll(properties);
-        }
-        final Connection conn = DriverManager.getConnection(QueryUtil.getUrl(configuration.get(HConstants.ZOOKEEPER_QUORUM)), props);
-        return conn;
+        return props;
     }
-
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index b8b64b2..6e0e5e4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -17,18 +17,21 @@
  */
 package org.apache.phoenix.mapreduce.util;
 
-import static org.apache.commons.lang.StringUtils.isNotEmpty;
-
 import java.io.IOException;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable;
@@ -42,10 +45,7 @@ import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Splitter;
-import com.google.common.collect.Lists;
+import static org.apache.commons.lang.StringUtils.isNotEmpty;
 
 /**
  * A utility class to set properties on the {#link Configuration} instance.
@@ -90,7 +90,11 @@ public final class PhoenixConfigurationUtil {
     
     /** Configuration key for the class name of an ImportPreUpsertKeyValueProcessor */
     public static final String UPSERT_HOOK_CLASS_CONFKEY = "phoenix.mapreduce.import.kvprocessor";
+
+    public static final String MAPREDUCE_INPUT_CLUSTER_QUORUM = "phoenix.mapreduce.input.cluster.quorum";
     
+    public static final String MAPREDUCE_OUTPUT_CLUSTER_QUORUM = "phoneix.mapreduce.output.cluster.quorum";
+
     public enum SchemaType {
         TABLE,
         QUERY;
@@ -165,6 +169,28 @@ public final class PhoenixConfigurationUtil {
         configuration.setLong(UPSERT_BATCH_SIZE, batchSize);
     }
     
+    /**
+     * Sets which HBase cluster a Phoenix MapReduce job should read from
+     * @param configuration
+     * @param quorum ZooKeeper quorum string for HBase cluster the MapReduce job will read from
+     */
+    public static void setInputCluster(final Configuration configuration,
+            final String quorum) {
+        Preconditions.checkNotNull(configuration);
+        configuration.set(MAPREDUCE_INPUT_CLUSTER_QUORUM, quorum);
+    }
+
+    /**
+     * Sets which HBase cluster a Phoenix MapReduce job should write to
+     * @param configuration
+     * @param quorum ZooKeeper quorum string for HBase cluster the MapReduce job will write to
+     */
+    public static void setOutputCluster(final Configuration configuration,
+            final String quorum) {
+        Preconditions.checkNotNull(configuration);
+        configuration.set(MAPREDUCE_OUTPUT_CLUSTER_QUORUM, quorum);
+    }
+        
     public static Class<?> getInputClass(final Configuration configuration) {
         return configuration.getClass(INPUT_CLASS, NullDBWritable.class);
     }
@@ -182,7 +208,7 @@ public final class PhoenixConfigurationUtil {
         if(isNotEmpty(columnInfoStr)) {
             return ColumnInfoToStringEncoderDecoder.decode(columnInfoStr);
         }
-        final Connection connection = ConnectionUtil.getConnection(configuration);
+        final Connection connection = ConnectionUtil.getOutputConnection(configuration);
         String upsertColumns = configuration.get(UPSERT_COLUMNS);
         List<String> upsertColumnList = null;
         if(isNotEmpty(upsertColumns)) {
@@ -232,7 +258,7 @@ public final class PhoenixConfigurationUtil {
         }
         final String tableName = getInputTableName(configuration);
         Preconditions.checkNotNull(tableName);
-        final Connection connection = ConnectionUtil.getConnection(configuration);
+        final Connection connection = ConnectionUtil.getInputConnection(configuration);
         final List<String> selectColumnList = getSelectColumnList(configuration);
         final List<ColumnInfo> columnMetadataList = PhoenixRuntime.generateColumnInfo(connection, tableName, selectColumnList);
         final String encodedColumnInfos = ColumnInfoToStringEncoderDecoder.encode(columnMetadataList);
@@ -276,7 +302,7 @@ public final class PhoenixConfigurationUtil {
         Preconditions.checkNotNull(configuration);
         long batchSize = configuration.getLong(UPSERT_BATCH_SIZE, DEFAULT_UPSERT_BATCH_SIZE);
         if(batchSize <= 0) {
-           Connection conn = ConnectionUtil.getConnection(configuration);
+           Connection conn = ConnectionUtil.getOutputConnection(configuration);
            batchSize = ((PhoenixConnection) conn).getMutateBatchSize();
            conn.close();
         }
@@ -309,6 +335,34 @@ public final class PhoenixConfigurationUtil {
         Preconditions.checkNotNull(configuration);
         return configuration.get(OUTPUT_TABLE_NAME);
     }
+    
+    /**
+     * Returns the ZooKeeper quorum string for the HBase cluster a Phoenix MapReduce job will read from
+     * @param configuration
+     * @return ZooKeeper quorum string
+     */
+    public static String getInputCluster(final Configuration configuration) {
+        Preconditions.checkNotNull(configuration);
+        String quorum = configuration.get(MAPREDUCE_INPUT_CLUSTER_QUORUM);
+        if (quorum == null) {
+            quorum = configuration.get(HConstants.ZOOKEEPER_QUORUM);
+        }
+        return quorum;
+    }
+
+    /**
+     * Returns the ZooKeeper quorum string for the HBase cluster a Phoenix MapReduce job will write to
+     * @param configuration
+     * @return ZooKeeper quorum string
+     */
+    public static String getOutputCluster(final Configuration configuration) {
+        Preconditions.checkNotNull(configuration);
+        String quorum = configuration.get(MAPREDUCE_OUTPUT_CLUSTER_QUORUM);
+        if (quorum == null) {
+            quorum = configuration.get(HConstants.ZOOKEEPER_QUORUM);
+        }
+        return quorum;
+    }
 
     public static void loadHBaseConfiguration(Job job) throws IOException {
         // load hbase-site.xml

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
index f1a7f5a..74d39bd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java
@@ -49,7 +49,7 @@ public final class PhoenixMapReduceUtil {
           PhoenixConfigurationUtil.setInputClass(configuration,inputClass);
           PhoenixConfigurationUtil.setSchemaType(configuration, SchemaType.TABLE);
     }
-    
+       
     /**
      * 
      * @param job         
@@ -64,9 +64,19 @@ public final class PhoenixMapReduceUtil {
           PhoenixConfigurationUtil.setInputQuery(configuration, inputQuery);
           PhoenixConfigurationUtil.setInputClass(configuration,inputClass);
           PhoenixConfigurationUtil.setSchemaType(configuration, SchemaType.QUERY);
+          
      }
     
     /**
+     * A method to override which HBase cluster for {@link PhoenixInputFormat} to read from
+     * @param job MapReduce Job
+     * @param quorum an HBase cluster's ZooKeeper quorum
+     */
+    public static void setInputCluster(final Job job, final String quorum) {
+        final Configuration configuration = job.getConfiguration();
+        PhoenixConfigurationUtil.setInputCluster(configuration, quorum);
+    }
+    /**
      * 
      * @param job
      * @param outputClass  
@@ -94,6 +104,16 @@ public final class PhoenixMapReduceUtil {
           PhoenixConfigurationUtil.setOutputTableName(configuration, tableName);
           PhoenixConfigurationUtil.setUpsertColumnNames(configuration,fieldNames);
     }
+    
+    /**
+     * A method to override which HBase cluster for {@link PhoenixOutputFormat} to write to
+     * @param job MapReduce Job
+     * @param quorum an HBase cluster's ZooKeeper quorum
+     */
+    public static void setOutputCluster(final Job job, final String quorum) {
+        final Configuration configuration = job.getConfiguration();
+        PhoenixConfigurationUtil.setOutputCluster(configuration, quorum);
+    }
 
     
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java
index 33c7531..f8f2a63 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java
@@ -23,13 +23,12 @@ import static org.junit.Assert.assertEquals;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.mapreduce.Job;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.SchemaType;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
-import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
@@ -39,7 +38,8 @@ import org.junit.Test;
  * Test for {@link PhoenixConfigurationUtil}
  */
 public class PhoenixConfigurationUtilTest extends BaseConnectionlessQueryTest {
-    
+    private static final String ORIGINAL_CLUSTER_QUORUM = "myzookeeperhost";
+    private static final String OVERRIDE_CLUSTER_QUORUM = "myoverridezookeeperhost";
     @Test
     public void testUpsertStatement() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES));
@@ -121,4 +121,58 @@ public class PhoenixConfigurationUtilTest extends BaseConnectionlessQueryTest {
             conn.close();
         }
     }
+    
+    @Test
+    public void testInputClusterOverride() throws Exception {
+        final Configuration configuration = new Configuration();
+        configuration.set(HConstants.ZOOKEEPER_QUORUM, ORIGINAL_CLUSTER_QUORUM);
+        String zkQuorum = PhoenixConfigurationUtil.getInputCluster(configuration);
+        assertEquals(zkQuorum, ORIGINAL_CLUSTER_QUORUM);
+
+        configuration.set(PhoenixConfigurationUtil.MAPREDUCE_INPUT_CLUSTER_QUORUM,
+            OVERRIDE_CLUSTER_QUORUM);
+        String zkQuorumOverride = PhoenixConfigurationUtil.getInputCluster(configuration);
+        assertEquals(zkQuorumOverride, OVERRIDE_CLUSTER_QUORUM);
+
+        final Configuration configuration2 = new Configuration();
+        PhoenixConfigurationUtil.setInputCluster(configuration2, OVERRIDE_CLUSTER_QUORUM);
+        String zkQuorumOverride2 =
+                PhoenixConfigurationUtil.getInputCluster(configuration2);
+        assertEquals(zkQuorumOverride2, OVERRIDE_CLUSTER_QUORUM);
+
+        final Job job = Job.getInstance();
+        PhoenixMapReduceUtil.setInputCluster(job, OVERRIDE_CLUSTER_QUORUM);
+        Configuration configuration3 = job.getConfiguration();
+        String zkQuorumOverride3 =
+                PhoenixConfigurationUtil.getInputCluster(configuration3);
+        assertEquals(zkQuorumOverride3, OVERRIDE_CLUSTER_QUORUM);
+
+    }
+
+    @Test
+    public void testOutputClusterOverride() throws Exception {
+        final Configuration configuration = new Configuration();
+        configuration.set(HConstants.ZOOKEEPER_QUORUM, ORIGINAL_CLUSTER_QUORUM);
+        String zkQuorum = PhoenixConfigurationUtil.getOutputCluster(configuration);
+        assertEquals(zkQuorum, ORIGINAL_CLUSTER_QUORUM);
+
+        configuration.set(PhoenixConfigurationUtil.MAPREDUCE_OUTPUT_CLUSTER_QUORUM,
+            OVERRIDE_CLUSTER_QUORUM);
+        String zkQuorumOverride = PhoenixConfigurationUtil.getOutputCluster(configuration);
+        assertEquals(zkQuorumOverride, OVERRIDE_CLUSTER_QUORUM);
+
+        final Configuration configuration2 = new Configuration();
+        PhoenixConfigurationUtil.setOutputCluster(configuration2, OVERRIDE_CLUSTER_QUORUM);
+        String zkQuorumOverride2 =
+                PhoenixConfigurationUtil.getOutputCluster(configuration2);
+        assertEquals(zkQuorumOverride2, OVERRIDE_CLUSTER_QUORUM);
+
+        final Job job = Job.getInstance();
+        PhoenixMapReduceUtil.setOutputCluster(job, OVERRIDE_CLUSTER_QUORUM);
+        Configuration configuration3 = job.getConfiguration();
+        String zkQuorumOverride3 =
+                PhoenixConfigurationUtil.getOutputCluster(configuration3);
+        assertEquals(zkQuorumOverride3, OVERRIDE_CLUSTER_QUORUM);
+
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/QuerySchemaParserFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/QuerySchemaParserFunction.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/QuerySchemaParserFunction.java
index f0148a6..4f43811 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/QuerySchemaParserFunction.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/QuerySchemaParserFunction.java
@@ -59,7 +59,7 @@ public class QuerySchemaParserFunction implements Function<String,Pair<String,St
         Preconditions.checkArgument(!selectStatement.isEmpty(), "Select Query is empty!!");
         Connection connection = null;
         try {
-            connection = ConnectionUtil.getConnection(this.configuration);
+            connection = ConnectionUtil.getInputConnection(this.configuration);
             final Statement  statement = connection.createStatement();
             final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
             final QueryPlan queryPlan = pstmt.compileQuery(selectStatement);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunction.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunction.java
index 3ed35bb..2ea2c06 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunction.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/SqlQueryToColumnInfoFunction.java
@@ -52,7 +52,7 @@ public final class SqlQueryToColumnInfoFunction implements Function<String,List<
         Connection connection = null;
         List<ColumnInfo> columnInfos = null;
         try {
-            connection = ConnectionUtil.getConnection(this.configuration);
+            connection = ConnectionUtil.getInputConnection(this.configuration);
             final Statement  statement = connection.createStatement();
             final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
             final QueryPlan queryPlan = pstmt.compileQuery(sqlQuery);


[14/31] phoenix git commit: Fix IndexExpressionIT test failures

Posted by ap...@apache.org.
Fix IndexExpressionIT test failures


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8ea426ce
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8ea426ce
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8ea426ce

Branch: refs/heads/4.x-HBase-1.x
Commit: 8ea426ceb55d29c3c4f06489cdd0a6b87d69d68c
Parents: 4d71610
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Thu Mar 26 12:45:20 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Thu Mar 26 13:06:51 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/phoenix/end2end/index/IndexExpressionIT.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ea426ce/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
index 0203e35..1a5fbcc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -480,7 +480,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             String expectedPlan = "CLIENT PARALLEL 1-WAY "
                     + (localIndex ? "RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName + " [-32768]"
                             : "FULL SCAN OVER INDEX_TEST.IDX")
-                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT((A.INT_COL1 + B.INT_COL2))]\nCLIENT MERGE SORT";
+                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT(\"(A.INT_COL1 + B.INT_COL2)\")]\nCLIENT MERGE SORT";
             assertEquals(expectedPlan, QueryUtil.getExplainPlan(rs));
             rs = conn.createStatement().executeQuery(groupBySql);
             assertTrue(rs.next());
@@ -531,7 +531,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             String expectedPlan = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
                     + (localIndex ? "_LOCAL_IDX_" + fullDataTableName + " [-32768,0] - [-32768,*]"
                             : "INDEX_TEST.IDX [0] - [*]")
-                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT((A.INT_COL1 + 1))]\nCLIENT MERGE SORT";
+                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT(\"(A.INT_COL1 + 1)\")]\nCLIENT MERGE SORT";
             assertEquals(expectedPlan, QueryUtil.getExplainPlan(rs));
             rs = conn.createStatement().executeQuery(sql);
             assertTrue(rs.next());