You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ra...@apache.org on 2017/11/14 04:30:33 UTC

[1/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)

Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 0454e4211 -> c82cc18d8


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java
index 3a6de6a..56080f8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java
@@ -25,7 +25,9 @@ import java.util.HashSet;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -90,8 +92,7 @@ public class TestApplyAndFilterDeletesFilter {
     byte[] laterFamily = Bytes.toBytes("zfamily");
     filter = new ApplyAndFilterDeletesFilter(asSet(laterFamily));
     assertEquals(ReturnCode.SKIP, filter.filterKeyValue(kv));
-    @SuppressWarnings("deprecation")
-    KeyValue expected = KeyValue.createFirstOnRow(kv.getRow(), laterFamily, new byte[0]);
+    KeyValue expected = KeyValueUtil.createFirstOnRow(CellUtil.cloneRow(kv), laterFamily, new byte[0]);
     assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT,
       filter.filterKeyValue(next));
     assertEquals("Didn't get correct next key with a next family", expected,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java
index dbf67fc..0204cd1 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java
@@ -34,8 +34,9 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -123,14 +124,14 @@ public class IndexMaintainerTest  extends BaseConnectionlessQueryTest {
                 stmt.setObject(i+1, values[i]);
             }
             stmt.execute();
-            	Iterator<Pair<byte[],List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
-            List<KeyValue> dataKeyValues = iterator.next().getSecond();
+            	Iterator<Pair<byte[],List<Cell>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+            List<Cell> dataKeyValues = iterator.next().getSecond();
             Map<ColumnReference,byte[]> valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size());
-            byte[] row = dataKeyValues.get(0).getRow();
-			ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(row);
-            Put dataMutation = new Put(rowKeyPtr.copyBytes());
-            for (KeyValue kv : dataKeyValues) {
-                valueMap.put(new ColumnReference(kv.getFamily(),kv.getQualifier()), kv.getValue());
+			ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(dataKeyValues.get(0).getRowArray(), dataKeyValues.get(0).getRowOffset(), dataKeyValues.get(0).getRowLength());
+            byte[] row = rowKeyPtr.copyBytes();
+            Put dataMutation = new Put(row);
+            for (Cell kv : dataKeyValues) {
+                valueMap.put(new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), CellUtil.cloneValue(kv));
                 dataMutation.add(kv);
             }
             ValueGetter valueGetter = newValueGetter(row, valueMap);
@@ -148,7 +149,7 @@ public class IndexMaintainerTest  extends BaseConnectionlessQueryTest {
                 valueMap.get(ref);
             }
             byte[] dataRowKey = im1.buildDataRowKey(indexKeyPtr, null);
-            assertArrayEquals(dataRowKey, dataKeyValues.get(0).getRow());
+            assertArrayEquals(dataRowKey, CellUtil.cloneRow(dataKeyValues.get(0)));
         } finally {
             try {
                 conn.createStatement().execute("DROP TABLE " + fullTableName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java
index 33c72a8..761aa23 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java
@@ -27,7 +27,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.junit.Test;
 
@@ -94,7 +94,7 @@ public class CsvBulkImportUtilTest {
     public static class MockProcessor implements ImportPreUpsertKeyValueProcessor {
 
         @Override
-        public List<KeyValue> preUpsert(byte[] rowKey, List<KeyValue> keyValues) {
+        public List<Cell> preUpsert(byte[] rowKey, List<Cell> keyValues) {
             throw new UnsupportedOperationException("Not yet implemented");
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java
index 6424976..59a5edc 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.mapreduce;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.schema.types.PInteger;
@@ -95,7 +96,7 @@ public class FormatToBytesWritableMapperTest {
 
     static class MockUpsertProcessor implements ImportPreUpsertKeyValueProcessor {
         @Override
-        public List<KeyValue> preUpsert(byte[] rowKey, List<KeyValue> keyValues) {
+        public List<Cell> preUpsert(byte[] rowKey, List<Cell> keyValues) {
             throw new UnsupportedOperationException("Not yet implemented");
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java
index 4571115..61163d1 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java
@@ -31,7 +31,8 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -118,8 +119,8 @@ public class ConnectionlessTest {
         statement.setDate(5,now);
         statement.execute();
         
-        Iterator<Pair<byte[],List<KeyValue>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
-        Iterator<KeyValue> iterator = dataIterator.next().getSecond().iterator();
+        Iterator<Pair<byte[],List<Cell>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        Iterator<Cell> iterator = dataIterator.next().getSecond().iterator();
 
         byte[] expectedRowKey1 = saltBuckets == null ? unsaltedRowKey1 : saltedRowKey1;
         byte[] expectedRowKey2 = saltBuckets == null ? unsaltedRowKey2 : saltedRowKey2;
@@ -136,34 +137,34 @@ public class ConnectionlessTest {
         conn.rollback(); // to clear the list of mutations for the next
     }
     
-    private static void assertRow1(Iterator<KeyValue> iterator, byte[] expectedRowKey1) {
-        KeyValue kv;
+    private static void assertRow1(Iterator<Cell> iterator, byte[] expectedRowKey1) {
+        Cell kv;
         assertTrue(iterator.hasNext());
         kv = iterator.next();
-        assertArrayEquals(expectedRowKey1, kv.getRow());        
-        assertEquals(QueryConstants.EMPTY_COLUMN_VALUE, PVarchar.INSTANCE.toObject(kv.getValue()));
+        assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv));        
+        assertEquals(QueryConstants.EMPTY_COLUMN_VALUE, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv)));
         kv = iterator.next();
-        assertArrayEquals(expectedRowKey1, kv.getRow());        
-        assertEquals(name1, PVarchar.INSTANCE.toObject(kv.getValue()));
+        assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv));        
+        assertEquals(name1, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv)));
         assertTrue(iterator.hasNext());
         kv = iterator.next();
-        assertArrayEquals(expectedRowKey1, kv.getRow());        
-        assertEquals(now, PDate.INSTANCE.toObject(kv.getValue()));
+        assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv));        
+        assertEquals(now, PDate.INSTANCE.toObject(CellUtil.cloneValue(kv)));
     }
 
-    private static void assertRow2(Iterator<KeyValue> iterator, byte[] expectedRowKey2) {
-        KeyValue kv;
+    private static void assertRow2(Iterator<Cell> iterator, byte[] expectedRowKey2) {
+        Cell kv;
         kv = iterator.next();
-        assertArrayEquals(expectedRowKey2, kv.getRow());        
-        assertEquals(QueryConstants.EMPTY_COLUMN_VALUE, PVarchar.INSTANCE.toObject(kv.getValue()));
+        assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv));        
+        assertEquals(QueryConstants.EMPTY_COLUMN_VALUE, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv)));
         assertTrue(iterator.hasNext());
         kv = iterator.next();
-        assertArrayEquals(expectedRowKey2, kv.getRow());        
-        assertEquals(name2, PVarchar.INSTANCE.toObject(kv.getValue()));
+        assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv));        
+        assertEquals(name2, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv)));
         assertTrue(iterator.hasNext());
         kv = iterator.next();
-        assertArrayEquals(expectedRowKey2, kv.getRow());        
-        assertEquals(now, PDate.INSTANCE.toObject(kv.getValue()));
+        assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv));        
+        assertEquals(now, PDate.INSTANCE.toObject(CellUtil.cloneValue(kv)));
     }
     
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
index 1052184..b9aabfa 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
@@ -33,7 +33,7 @@ import java.util.ListIterator;
 import java.util.NoSuchElementException;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList;
 import org.junit.Test;
@@ -122,7 +122,7 @@ public class EncodedColumnQualifierCellsListTest {
         for (Cell c : cells) {
             assertTrue(list.contains(c));
         }
-        assertFalse(list.contains(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13))));
+        assertFalse(list.contains(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13))));
     }
     
     @Test
@@ -154,7 +154,7 @@ public class EncodedColumnQualifierCellsListTest {
         assertEquals(5, list.size());
         assertTrue(list.remove(cells[3]));
         assertEquals(4, list.size());
-        assertFalse(list.remove(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13))));
+        assertFalse(list.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13))));
         assertEquals(4, list.size());
     }
     
@@ -165,10 +165,10 @@ public class EncodedColumnQualifierCellsListTest {
         EncodedColumnQualiferCellsList list2 = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
         populateList(list2);
         assertTrue(list1.containsAll(list2));
-        list2.remove(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11)));
+        list2.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11)));
         assertTrue(list1.containsAll(list2));
         assertFalse(list2.containsAll(list1));
-        list2.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13)));
+        list2.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13)));
         assertFalse(list1.containsAll(list2));
         assertFalse(list2.containsAll(list1));
         List<Cell> arrayList = new ArrayList<>();
@@ -218,7 +218,7 @@ public class EncodedColumnQualifierCellsListTest {
         populateList(list2);
         // retainAll won't be modifying the list1 since they both have the same elements equality wise
         assertFalse(list1.retainAll(list2));
-        list2.remove(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12)));
+        list2.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12)));
         assertTrue(list1.retainAll(list2));
         assertEquals(list1.size(), list2.size());
         for (Cell c : list1) {
@@ -408,7 +408,7 @@ public class EncodedColumnQualifierCellsListTest {
             i++;
             try {
                 itr.next();
-                list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
+                list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
                 if (i == 2) {
                     fail("ConcurrentModificationException should have been thrown as the list is being modified while being iterated through");
                 }
@@ -426,7 +426,7 @@ public class EncodedColumnQualifierCellsListTest {
         populateList(list);
         ListIterator<Cell> itr = list.listIterator();
         itr.next();
-        list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
+        list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
         try {
             itr.next();
             fail("ConcurrentModificationException should have been thrown as the list was modified without using iterator");
@@ -440,7 +440,7 @@ public class EncodedColumnQualifierCellsListTest {
         itr.next();
         itr.remove();
         itr.next();
-        list.remove(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
+        list.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
         try {
             itr.next();
             fail("ConcurrentModificationException should have been thrown as the list was modified without using iterator");
@@ -451,28 +451,28 @@ public class EncodedColumnQualifierCellsListTest {
     
     private void populateListAndArray(List<Cell> list, Cell[] cells) {
         // add elements in reserved range
-        list.add(cells[0] = KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
-        list.add(cells[1] = KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(5)));
-        list.add(cells[2] = KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(10)));
+        list.add(cells[0] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
+        list.add(cells[1] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(5)));
+        list.add(cells[2] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(10)));
 
         // add elements in qualifier range
-        list.add(cells[6] = KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(16)));
-        list.add(cells[4] = KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12)));
-        list.add(cells[5] = KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(14)));
-        list.add(cells[3] = KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11)));
+        list.add(cells[6] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(16)));
+        list.add(cells[4] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12)));
+        list.add(cells[5] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(14)));
+        list.add(cells[3] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11)));
     }
 
     private void populateList(List<Cell> list) {
         // add elements in reserved range
-        list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
-        list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(5)));
-        list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(10)));
+        list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0)));
+        list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(5)));
+        list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(10)));
 
         // add elements in qualifier range
-        list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(16)));
-        list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12)));
-        list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(14)));
-        list.add(KeyValue.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11)));
+        list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(16)));
+        list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12)));
+        list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(14)));
+        list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11)));
     }
     
     private class DelegateCell implements Cell {
@@ -499,11 +499,6 @@ public class EncodedColumnQualifierCellsListTest {
         }
 
         @Override
-        public byte[] getValue() {
-            return delegate.getValue();
-        }
-
-        @Override
         public byte getTypeByte() {
             return delegate.getTypeByte();
         }
@@ -539,11 +534,6 @@ public class EncodedColumnQualifierCellsListTest {
         }
 
         @Override
-        public byte[] getRow() {
-            return delegate.getRow();
-        }
-
-        @Override
         public int getQualifierOffset() {
             return delegate.getQualifierOffset();
         }
@@ -559,16 +549,6 @@ public class EncodedColumnQualifierCellsListTest {
         }
 
         @Override
-        public byte[] getQualifier() {
-            return delegate.getQualifier();
-        }
-
-        @Override
-        public long getMvccVersion() {
-            return delegate.getMvccVersion();
-        }
-
-        @Override
         public int getFamilyOffset() {
             return delegate.getFamilyOffset();
         }
@@ -584,11 +564,6 @@ public class EncodedColumnQualifierCellsListTest {
         }
 
         @Override
-        public byte[] getFamily() {
-            return delegate.getFamily();
-        }
-        
-        @Override
         public String toString() {
             return name;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java
index abe2df0..c200c40 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java
@@ -26,7 +26,8 @@ import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.schema.SortOrder;
@@ -42,12 +43,12 @@ public class OrderByTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("UPSERT INTO t VALUES ('a')");
         conn.createStatement().execute("UPSERT INTO t VALUES ('ab')");
 
-        Iterator<Pair<byte[],List<KeyValue>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
-        List<KeyValue> kvs = dataIterator.next().getSecond();
-        Collections.sort(kvs, KeyValue.COMPARATOR);
-        KeyValue first = kvs.get(0);
+        Iterator<Pair<byte[],List<Cell>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        List<Cell> kvs = dataIterator.next().getSecond();
+        Collections.sort(kvs, CellComparatorImpl.COMPARATOR);
+        Cell first = kvs.get(0);
         assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), first.getRowLength()-1)));
-        KeyValue second = kvs.get(1);
+        Cell second = kvs.get(1);
         assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), second.getRowLength()-1)));
     }
 
@@ -58,12 +59,12 @@ public class OrderByTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("UPSERT INTO t VALUES ('a')");
         conn.createStatement().execute("UPSERT INTO t VALUES ('ab')");
 
-        Iterator<Pair<byte[],List<KeyValue>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
-        List<KeyValue> kvs = dataIterator.next().getSecond();
-        Collections.sort(kvs, KeyValue.COMPARATOR);
-        KeyValue first = kvs.get(0);
+        Iterator<Pair<byte[],List<Cell>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        List<Cell> kvs = dataIterator.next().getSecond();
+        Collections.sort(kvs, CellComparatorImpl.COMPARATOR);
+        Cell first = kvs.get(0);
         assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), first.getRowLength()-1)));
-        KeyValue second = kvs.get(1);
+        Cell second = kvs.get(1);
         assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), second.getRowLength()-1)));
     }
 
@@ -74,12 +75,12 @@ public class OrderByTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("UPSERT INTO t VALUES ('a','x')");
         conn.createStatement().execute("UPSERT INTO t VALUES ('ab', 'x')");
 
-        Iterator<Pair<byte[],List<KeyValue>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
-        List<KeyValue> kvs = dataIterator.next().getSecond();
-        Collections.sort(kvs, KeyValue.COMPARATOR);
-        KeyValue first = kvs.get(0);
+        Iterator<Pair<byte[],List<Cell>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        List<Cell> kvs = dataIterator.next().getSecond();
+        Collections.sort(kvs, CellComparatorImpl.COMPARATOR);
+        Cell first = kvs.get(0);
         assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), 2)));
-        KeyValue second = kvs.get(1);
+        Cell second = kvs.get(1);
         assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), 1)));
     }
 
@@ -89,11 +90,11 @@ public class OrderByTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("CREATE TABLE t (k TIMESTAMP PRIMARY KEY DESC)");
         conn.createStatement().execute("UPSERT INTO t VALUES ('2016-01-04 13:11:51.631')");
 
-        Iterator<Pair<byte[], List<KeyValue>>> dataIterator = PhoenixRuntime
+        Iterator<Pair<byte[], List<Cell>>> dataIterator = PhoenixRuntime
             .getUncommittedDataIterator(conn);
-        List<KeyValue> kvs = dataIterator.next().getSecond();
-        Collections.sort(kvs, KeyValue.COMPARATOR);
-        KeyValue first = kvs.get(0);
+        List<Cell> kvs = dataIterator.next().getSecond();
+        Collections.sort(kvs, CellComparatorImpl.COMPARATOR);
+        Cell first = kvs.get(0);
         long millisDeserialized = PDate.INSTANCE.getCodec().decodeLong(first.getRowArray(),
             first.getRowOffset(), SortOrder.DESC);
         assertEquals(1451913111631L, millisDeserialized);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java
index a435ba6..9d689f9 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java
@@ -28,7 +28,7 @@ import java.sql.PreparedStatement;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -67,9 +67,9 @@ public class RowKeySchemaTest  extends BaseConnectionlessQueryTest  {
             stmt.setObject(i+1, values[i]);
         }
         stmt.execute();
-            Iterator<Pair<byte[],List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
-        List<KeyValue> dataKeyValues = iterator.next().getSecond();
-        KeyValue keyValue = dataKeyValues.get(0);
+            Iterator<Pair<byte[],List<Cell>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        List<Cell> dataKeyValues = iterator.next().getSecond();
+        Cell keyValue = dataKeyValues.get(0);
         
         List<SortOrder> sortOrders = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
         for (PColumn col : table.getPKColumns()) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
index 928eb70..dfeaa4c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java
@@ -26,12 +26,14 @@ import java.sql.PreparedStatement;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Test;
@@ -63,9 +65,9 @@ public class RowKeyValueAccessorTest  extends BaseConnectionlessQueryTest  {
             stmt.setObject(i+1, values[i]);
         }
         stmt.execute();
-            Iterator<Pair<byte[],List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
-        List<KeyValue> dataKeyValues = iterator.next().getSecond();
-        KeyValue keyValue = dataKeyValues.get(0);
+            Iterator<Pair<byte[],List<Cell>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        List<Cell> dataKeyValues = iterator.next().getSecond();
+        KeyValue keyValue = PhoenixKeyValueUtil.maybeCopyCell(dataKeyValues.get(0));
         
         List<PColumn> pkColumns = table.getPKColumns();
         RowKeyValueAccessor accessor = new RowKeyValueAccessor(pkColumns, 3);


[4/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)

Posted by ra...@apache.org.
PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c82cc18d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c82cc18d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c82cc18d

Branch: refs/heads/5.x-HBase-2.0
Commit: c82cc18d8432baba8e2cbd10af121cd39f83ca05
Parents: 0454e42
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Tue Nov 14 10:00:00 2017 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Tue Nov 14 10:00:00 2017 +0530

----------------------------------------------------------------------
 ...ReplayWithIndexWritesAndCompressedWALIT.java |   2 +-
 .../phoenix/end2end/MappingTableDataTypeIT.java |   6 +-
 .../apache/phoenix/end2end/RowTimestampIT.java  |   8 +-
 .../phoenix/end2end/index/DropColumnIT.java     |   6 +-
 .../phoenix/end2end/index/ImmutableIndexIT.java |   4 +-
 .../phoenix/end2end/index/IndexTestUtil.java    |   4 +-
 .../DataTableLocalIndexRegionScanner.java       |   3 +-
 .../regionserver/IndexHalfStoreFileReader.java  |   4 +-
 .../IndexHalfStoreFileReaderGenerator.java      |   5 +-
 .../regionserver/IndexKeyValueSkipListSet.java  |  16 +-
 .../hbase/regionserver/KeyValueSkipListSet.java |  57 ++---
 .../LocalIndexStoreFileScanner.java             |  24 +-
 .../regionserver/wal/IndexedWALEditCodec.java   |   6 +-
 .../phoenix/cache/aggcache/SpillManager.java    |   7 +-
 .../cache/aggcache/SpillableGroupByCache.java   |   4 +-
 .../phoenix/compile/ListJarsQueryPlan.java      |   7 +-
 .../MutatingParallelIteratorFactory.java        |   6 +-
 .../apache/phoenix/compile/TraceQueryPlan.java  |  11 +-
 .../GroupedAggregateRegionObserver.java         |  13 +-
 .../coprocessor/MetaDataEndpointImpl.java       | 215 +++++++++-------
 .../coprocessor/SequenceRegionObserver.java     |  46 ++--
 .../UngroupedAggregateRegionObserver.java       |  16 +-
 .../apache/phoenix/execute/MutationState.java   |   9 +-
 .../phoenix/execute/SortMergeJoinPlan.java      |   6 +-
 .../apache/phoenix/execute/TupleProjector.java  |  17 +-
 .../phoenix/filter/DistinctPrefixFilter.java    |   4 +-
 .../hbase/index/builder/BaseIndexBuilder.java   |   4 +-
 .../hbase/index/builder/IndexBuilder.java       |   4 +-
 .../phoenix/hbase/index/covered/Batch.java      |   9 +-
 .../hbase/index/covered/KeyValueStore.java      |   6 +-
 .../hbase/index/covered/LocalTableState.java    |  29 +--
 .../hbase/index/covered/NonTxIndexBuilder.java  |   5 +-
 .../phoenix/hbase/index/covered/TableState.java |   4 +-
 .../hbase/index/covered/data/IndexMemStore.java |  53 ++--
 .../index/covered/data/LocalHBaseState.java     |   4 +-
 .../filter/ApplyAndFilterDeletesFilter.java     |  21 +-
 .../covered/filter/MaxTimestampFilter.java      |  11 +-
 .../index/covered/update/ColumnReference.java   |   3 +-
 .../index/scanner/FilteredKeyValueScanner.java  |   4 +-
 .../index/util/GenericKeyValueBuilder.java      |   7 +-
 .../hbase/index/util/IndexManagementUtil.java   |  10 +-
 .../hbase/index/util/KeyValueBuilder.java       |   6 +-
 .../hbase/index/wal/IndexedKeyValue.java        |  10 +-
 .../phoenix/hbase/index/wal/KeyValueCodec.java  |   2 -
 .../apache/phoenix/index/IndexMaintainer.java   |   9 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |   6 +-
 .../index/PhoenixTransactionalIndexer.java      |   8 +-
 .../BaseGroupedAggregatingResultIterator.java   |   8 +-
 .../GroupedAggregatingResultIterator.java       |   4 +-
 .../iterate/MappedByteBufferSortedQueue.java    |   5 +-
 .../NonAggregateRegionScannerFactory.java       |   4 +-
 .../RowKeyOrderedAggregateResultIterator.java   |   4 +-
 .../UngroupedAggregatingResultIterator.java     |   4 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   7 +-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  10 +-
 .../mapreduce/FormatToBytesWritableMapper.java  |  24 +-
 .../ImportPreUpsertKeyValueProcessor.java       |   3 +-
 .../mapreduce/MultiHfileOutputFormat.java       |   3 +-
 .../index/PhoenixIndexImportMapper.java         |  12 +-
 .../index/PhoenixIndexPartialBuildMapper.java   |   2 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |   1 -
 .../org/apache/phoenix/schema/Sequence.java     |  92 +++----
 .../stats/DefaultStatisticsCollector.java       |  10 +-
 .../schema/tuple/MultiKeyValueTuple.java        |   4 +-
 .../schema/tuple/PositionBasedResultTuple.java  |  10 +-
 .../phoenix/schema/tuple/ResultTuple.java       |  12 +-
 .../java/org/apache/phoenix/util/IndexUtil.java |  31 +--
 .../org/apache/phoenix/util/KeyValueUtil.java   | 238 ------------------
 .../org/apache/phoenix/util/MetaDataUtil.java   |   2 +-
 .../phoenix/util/PhoenixKeyValueUtil.java       | 245 +++++++++++++++++++
 .../org/apache/phoenix/util/PhoenixRuntime.java |  19 +-
 .../org/apache/phoenix/util/ResultUtil.java     |  12 +-
 .../java/org/apache/phoenix/util/TupleUtil.java |   5 +-
 .../org/apache/phoenix/util/UpgradeUtil.java    |   8 +-
 .../wal/ReadWriteKeyValuesWithCodecTest.java    |   5 +-
 .../phoenix/execute/MutationStateTest.java      |  11 +-
 .../phoenix/execute/UnnestArrayPlanTest.java    |   4 +-
 .../phoenix/filter/SkipScanFilterTest.java      |   9 +-
 .../phoenix/hbase/index/IndexTestingUtils.java  |   7 +-
 .../index/covered/CoveredColumnIndexCodec.java  |  22 +-
 .../index/covered/LocalTableStateTest.java      |   9 +-
 .../covered/TestCoveredColumnIndexCodec.java    |  10 +-
 .../index/covered/data/TestIndexMemStore.java   |   8 +-
 .../filter/TestApplyAndFilterDeletesFilter.java |   5 +-
 .../phoenix/index/IndexMaintainerTest.java      |  19 +-
 .../mapreduce/CsvBulkImportUtilTest.java        |   4 +-
 .../FormatToBytesWritableMapperTest.java        |   3 +-
 .../phoenix/query/ConnectionlessTest.java       |  39 +--
 .../EncodedColumnQualifierCellsListTest.java    |  71 ++----
 .../org/apache/phoenix/query/OrderByTest.java   |  41 ++--
 .../apache/phoenix/schema/RowKeySchemaTest.java |   8 +-
 .../phoenix/schema/RowKeyValueAccessorTest.java |   8 +-
 92 files changed, 885 insertions(+), 898 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index b504acd..9566e48 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -300,7 +300,7 @@ private int getKeyValueCount(Table table) throws IOException {
     ResultScanner results = table.getScanner(scan);
     int count = 0;
     for (Result res : results) {
-      count += res.list().size();
+      count += res.listCells().size();
       LOG.debug(count + ") " + res);
     }
     results.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
index 5173fe4..fb78e1c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
@@ -33,9 +33,9 @@ import java.sql.SQLException;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
@@ -95,9 +95,9 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT {
             ResultScanner results = t.getScanner(scan);
             Result result = results.next();
             assertNotNull("Expected single row", result);
-            List<KeyValue> kvs = result.getColumn(Bytes.toBytes("cf2"), Bytes.toBytes("q2"));
+            List<Cell> kvs = result.getColumnCells(Bytes.toBytes("cf2"), Bytes.toBytes("q2"));
             assertEquals("Expected single value ", 1, kvs.size());
-            assertEquals("Column Value", "value2", Bytes.toString(kvs.get(0).getValue()));
+            assertEquals("Column Value", "value2", Bytes.toString(kvs.get(0).getValueArray(), kvs.get(0).getValueOffset(), kvs.get(0).getValueLength()));
             assertNull("Expected single row", results.next());
         } finally {
             admin.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java
index 930092d..509e305 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java
@@ -149,14 +149,14 @@ public class RowTimestampIT extends ParallelStatsDisabledIT {
         Table hTable = hbaseConn.getTable(TableName.valueOf(tableName));
         ResultScanner resultScanner = hTable.getScanner(scan);
         for (Result result : resultScanner) {
-            long timeStamp = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
+            long timeStamp = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
             assertEquals(rowTimestampDate.getTime(), timeStamp);
         }
         if (!mutable) {
             hTable = hbaseConn.getTable(TableName.valueOf(indexName));
              resultScanner = hTable.getScanner(scan);
             for (Result result : resultScanner) {
-                long timeStamp = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
+                long timeStamp = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
                 assertEquals(rowTimestampDate.getTime(), timeStamp);
             }
         }
@@ -260,14 +260,14 @@ public class RowTimestampIT extends ParallelStatsDisabledIT {
             Table hTable = hbaseConn.getTable(TableName.valueOf(tableName));
             ResultScanner resultScanner = hTable.getScanner(scan);
             for (Result result : resultScanner) {
-                long timeStamp = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
+                long timeStamp = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
                 assertEquals(rowTimestampDate.getTime(), timeStamp);
             }
             if (!mutable) {
                 hTable = hbaseConn.getTable(TableName.valueOf(indexName));
                  resultScanner = hTable.getScanner(scan);
                 for (Result result : resultScanner) {
-                    long timeStamp = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
+                    long timeStamp = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
                     assertEquals(rowTimestampDate.getTime(), timeStamp);
                 }
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
index 766e924..db445f1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
@@ -203,7 +203,7 @@ public class DropColumnIT extends ParallelStatsDisabledIT {
                 Result result = results.next();
                 assertNotNull(result);
                 
-                assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dataCq).get(0).getTypeByte());
+                assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumnCells(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dataCq).get(0).getTypeByte());
                 assertNull(results.next());
                 
                 // key value for v2 should have been deleted from the global index table
@@ -213,7 +213,7 @@ public class DropColumnIT extends ParallelStatsDisabledIT {
                 results = table.getScanner(scan);
                 result = results.next();
                 assertNotNull(result);
-                assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, globalIndexCq).get(0).getTypeByte());
+                assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(),  result.getColumnCells(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, globalIndexCq).get(0).getTypeByte());
                 assertNull(results.next());
                 
                 // key value for v2 should have been deleted from the local index table
@@ -225,7 +225,7 @@ public class DropColumnIT extends ParallelStatsDisabledIT {
                 result = results.next();
                 assertNotNull(result);
                 assertEquals("data table col"
-                        + "umn value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, localIndexCq).get(0).getTypeByte());
+                        + "umn value should have been deleted", KeyValue.Type.DeleteColumn.getCode(),  result.getColumnCells(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, localIndexCq).get(0).getTypeByte());
                 assertNull(results.next()); 
             }
             else {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index e0398c7..9b06955 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -39,8 +39,8 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -247,7 +247,7 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT {
     }
 
     private void assertIndexMutations(Connection conn) throws SQLException {
-        Iterator<Pair<byte[], List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        Iterator<Pair<byte[], List<Cell>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
         assertTrue(iterator.hasNext());
         iterator.next();
         assertEquals(!localIndex, iterator.hasNext());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
index 52af966..888ff45 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
@@ -34,6 +34,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -133,8 +134,7 @@ public class IndexTestUtil {
                 for (Map.Entry<byte[],List<Cell>> entry : dataMutation.getFamilyCellMap().entrySet()) {
                     PColumnFamily family = dataTable.getColumnFamily(entry.getKey());
                     for (Cell kv : entry.getValue()) {
-                        @SuppressWarnings("deprecation")
-                        byte[] cq = kv.getQualifier();
+                        byte[] cq = CellUtil.cloneQualifier(kv);
                         byte[] emptyKVQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(dataTable).getFirst();
                         if (Bytes.compareTo(emptyKVQualifier, cq) != 0) {
                             try {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
index eee6c93..859b9ba 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
@@ -27,7 +27,6 @@ import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -124,7 +123,7 @@ public class DataTableLocalIndexRegionScanner extends DelegateRegionScanner {
                             del = new Delete(CellUtil.cloneRow(cell));
                             mutationList.add(del);
                         }
-                        del.addDeleteMarker(cell);
+                        del.add(cell);
                     }
                 }
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index d1d12fb..4b6b7e2 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.Reference;
@@ -84,7 +84,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
         this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
         // Is it top or bottom half?
         this.top = Reference.isTopFileRegion(r.getFileRegion());
-        this.splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
+        this.splitRow = CellUtil.cloneRow(KeyValueUtil.createKeyValueFromKey(splitkey));
         this.indexMaintainers = indexMaintainers;
         this.viewConstants = viewConstants;
         this.regionInfo = regionInfo;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index a50d5ce..6e0bbcb 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
@@ -119,7 +120,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                                 region.getRegionInfo().getRegionName());
                     if (mergeRegions == null || mergeRegions.getFirst() == null) return reader;
                     byte[] splitRow =
-                            CellUtil.cloneRow(KeyValue.createKeyValueFromKey(r.getSplitKey()));
+                            CellUtil.cloneRow(KeyValueUtil.createKeyValueFromKey(r.getSplitKey()));
                     // We need not change any thing in first region data because first region start key
                     // is equal to merged region start key. So returning same reader.
                     if (Bytes.compareTo(mergeRegions.getFirst().getStartKey(), splitRow) == 0) {
@@ -138,7 +139,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                         childRegion = mergeRegions.getSecond();
                         regionStartKeyInHFile = mergeRegions.getSecond().getStartKey();
                     }
-                    splitKey = KeyValue.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 ?
+                    splitKey = KeyValueUtil.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 ?
                         new byte[region.getRegionInfo().getEndKey().length] :
                             region.getRegionInfo().getStartKey()).getKey();
                 } else {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
index c322cb4..0d2de89 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.util.Comparator;
 import java.util.concurrent.ConcurrentSkipListMap;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
 
 /**
  * Like a {@link KeyValueSkipListSet}, but also exposes useful, atomic methods (e.g.
@@ -30,7 +30,7 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
 
   // this is annoying that we need to keep this extra pointer around here, but its pretty minimal
   // and means we don't need to change the HBase code.
-  private ConcurrentSkipListMap<KeyValue, KeyValue> delegate;
+  private ConcurrentSkipListMap<Cell, Cell> delegate;
 
   /**
    * Create a new {@link IndexKeyValueSkipListSet} based on the passed comparator.
@@ -38,9 +38,9 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
    *          well as object equality in the map.
    * @return a map that uses the passed comparator
    */
-  public static IndexKeyValueSkipListSet create(Comparator<KeyValue> comparator) {
-    ConcurrentSkipListMap<KeyValue, KeyValue> delegate =
-        new ConcurrentSkipListMap<KeyValue, KeyValue>(comparator);
+  public static IndexKeyValueSkipListSet create(CellComparator comparator) {
+    ConcurrentSkipListMap<Cell, Cell> delegate =
+        new ConcurrentSkipListMap<Cell, Cell>(comparator);
     IndexKeyValueSkipListSet ret = new IndexKeyValueSkipListSet(delegate);
     return ret;
   }
@@ -48,7 +48,7 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
   /**
    * @param delegate map to which to delegate all calls
    */
-  public IndexKeyValueSkipListSet(ConcurrentSkipListMap<KeyValue, KeyValue> delegate) {
+  public IndexKeyValueSkipListSet(ConcurrentSkipListMap<Cell, Cell> delegate) {
     super(delegate);
     this.delegate = delegate;
   }
@@ -70,7 +70,7 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
    *           the map
    * @throws NullPointerException if the specified key is null
    */
-  public KeyValue putIfAbsent(KeyValue kv) {
+  public Cell putIfAbsent(Cell kv) {
     return this.delegate.putIfAbsent(kv, kv);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
index 211aa10..b68abd9 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
@@ -18,7 +18,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.KeyValue;
 
 import java.util.Collection;
@@ -43,96 +44,96 @@ import java.util.concurrent.ConcurrentSkipListMap;
  * has same attributes as ConcurrentSkipListSet: e.g. tolerant of concurrent
  * get and set and won't throw ConcurrentModificationException when iterating.
  */
-public class KeyValueSkipListSet implements NavigableSet<KeyValue> {
-  private final ConcurrentNavigableMap<KeyValue, KeyValue> delegatee;
+public class KeyValueSkipListSet implements NavigableSet<Cell> {
+  private final ConcurrentNavigableMap<Cell, Cell> delegatee;
 
-  KeyValueSkipListSet(final KeyValue.KVComparator c) {
-    this.delegatee = new ConcurrentSkipListMap<KeyValue, KeyValue>(c);
+  KeyValueSkipListSet(final CellComparator c) {
+    this.delegatee = new ConcurrentSkipListMap<Cell, Cell>(c);
   }
 
-  KeyValueSkipListSet(final ConcurrentNavigableMap<KeyValue, KeyValue> m) {
+  KeyValueSkipListSet(final ConcurrentNavigableMap<Cell, Cell> m) {
     this.delegatee = m;
   }
 
-  public KeyValue ceiling(KeyValue e) {
+  public Cell ceiling(Cell e) {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public Iterator<KeyValue> descendingIterator() {
+  public Iterator<Cell> descendingIterator() {
     return this.delegatee.descendingMap().values().iterator();
   }
 
-  public NavigableSet<KeyValue> descendingSet() {
+  public NavigableSet<Cell> descendingSet() {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public KeyValue floor(KeyValue e) {
+  public Cell floor(Cell e) {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public SortedSet<KeyValue> headSet(final KeyValue toElement) {
+  public SortedSet<Cell> headSet(final Cell toElement) {
     return headSet(toElement, false);
   }
 
-  public NavigableSet<KeyValue> headSet(final KeyValue toElement,
+  public NavigableSet<Cell> headSet(final Cell toElement,
       boolean inclusive) {
     return new KeyValueSkipListSet(this.delegatee.headMap(toElement, inclusive));
   }
 
-  public KeyValue higher(KeyValue e) {
+  public KeyValue higher(Cell e) {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public Iterator<KeyValue> iterator() {
+  public Iterator<Cell> iterator() {
     return this.delegatee.values().iterator();
   }
 
-  public KeyValue lower(KeyValue e) {
+  public Cell lower(Cell e) {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public KeyValue pollFirst() {
+  public Cell pollFirst() {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public KeyValue pollLast() {
+  public Cell pollLast() {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public SortedSet<KeyValue> subSet(KeyValue fromElement, KeyValue toElement) {
+  public SortedSet<Cell> subSet(Cell fromElement, Cell toElement) {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public NavigableSet<KeyValue> subSet(KeyValue fromElement,
-      boolean fromInclusive, KeyValue toElement, boolean toInclusive) {
+  public NavigableSet<Cell> subSet(Cell fromElement,
+      boolean fromInclusive, Cell toElement, boolean toInclusive) {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public SortedSet<KeyValue> tailSet(KeyValue fromElement) {
+  public SortedSet<Cell> tailSet(Cell fromElement) {
     return tailSet(fromElement, true);
   }
 
-  public NavigableSet<KeyValue> tailSet(KeyValue fromElement, boolean inclusive) {
+  public NavigableSet<Cell> tailSet(Cell fromElement, boolean inclusive) {
     return new KeyValueSkipListSet(this.delegatee.tailMap(fromElement, inclusive));
   }
 
-  public Comparator<? super KeyValue> comparator() {
+  public Comparator<? super Cell> comparator() {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public KeyValue first() {
+  public Cell first() {
     return this.delegatee.get(this.delegatee.firstKey());
   }
 
-  public KeyValue last() {
+  public Cell last() {
     return this.delegatee.get(this.delegatee.lastKey());
   }
 
-  public boolean add(KeyValue e) {
+  public boolean add(Cell e) {
     return this.delegatee.put(e, e) == null;
   }
 
-  public boolean addAll(Collection<? extends KeyValue> c) {
+  public boolean addAll(Collection<? extends Cell> c) {
     throw new UnsupportedOperationException("Not implemented");
   }
 
@@ -165,7 +166,7 @@ public class KeyValueSkipListSet implements NavigableSet<KeyValue> {
     throw new UnsupportedOperationException("Not implemented");
   }
 
-  public KeyValue get(KeyValue kv) {
+  public Cell get(Cell kv) {
     return this.delegatee.get(kv);
   }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
index 3a80698..3b36a7e 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
@@ -21,16 +21,14 @@ import java.io.IOException;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 import static org.apache.hadoop.hbase.KeyValue.ROW_LENGTH_SIZE;
 
@@ -71,7 +69,7 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
         return peek;
     }
 
-    private KeyValue getChangedKey(Cell next, boolean changeBottomKeys) {
+    private Cell getChangedKey(Cell next, boolean changeBottomKeys) {
         // If it is a top store file change the StartKey with SplitKey in Key
         //and produce the new value corresponding to the change in key
         byte[] changedKey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(next, changeBottomKeys);
@@ -114,7 +112,7 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
 
     @Override
     public boolean seekToPreviousRow(Cell key) throws IOException {
-        KeyValue kv = KeyValueUtil.ensureKeyValue(key);
+        KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(key);
         if (reader.isTop()) {
             byte[] fk = reader.getFirstKey();
             // This will be null when the file is empty in which we can not seekBefore to
@@ -122,10 +120,10 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
             if (fk == null) {
                 return false;
             }
-            if (getComparator().compare(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(), fk, 0, fk.length) <= 0) {
+            if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), fk, 0, fk.length) <= 0) {
                 return super.seekToPreviousRow(key);
             }
-            KeyValue replacedKey = getKeyPresentInHFiles(kv.getBuffer());
+            KeyValue replacedKey = getKeyPresentInHFiles(kv.getRowArray());
             boolean seekToPreviousRow = super.seekToPreviousRow(replacedKey);
             while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
                 seekToPreviousRow = super.seekToPreviousRow(super.peek());
@@ -134,7 +132,7 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
         } else {
             // The equals sign isn't strictly necessary just here to be consistent with
             // seekTo
-            if (getComparator().compare(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
+            if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
                 boolean seekToPreviousRow = super.seekToPreviousRow(kv);
                 while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
                     seekToPreviousRow = super.seekToPreviousRow(super.peek());
@@ -221,24 +219,24 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
      * @throws IOException
      */
     public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{
-        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+        KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell);
         KeyValue keyToSeek = kv;
         if (reader.isTop()) {
-            if(getComparator().compare(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) < 0){
+            if(getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) < 0){
                 if(!isSeek && realSeekDone()) {
                     return true;
                 }
                 return seekOrReseekToProperKey(isSeek, keyToSeek);
             }
-            keyToSeek = getKeyPresentInHFiles(kv.getBuffer());
+            keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
             return seekOrReseekToProperKey(isSeek, keyToSeek);
         } else {
-            if (getComparator().compare(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
+            if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
                 close();
                 return false;
             }
             if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) {
-                keyToSeek = getKeyPresentInHFiles(kv.getBuffer());
+                keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
             }
         }
         return seekOrReseekToProperKey(isSeek, keyToSeek);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
index 80745a8..ebd212e 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
@@ -30,13 +30,13 @@ import java.io.OutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.codec.BaseDecoder;
 import org.apache.hadoop.hbase.codec.BaseEncoder;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.phoenix.hbase.index.util.VersionUtil;
 import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
 import org.apache.phoenix.hbase.index.wal.KeyValueCodec;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 
 /**
@@ -215,7 +215,7 @@ public class IndexedWALEditCodec extends WALCellCodec {
       checkFlushed();
 
       // use the standard encoding mechanism
-      KeyValueCodec.write(this.dataOutput, KeyValueUtil.ensureKeyValue(cell));
+      KeyValueCodec.write(this.dataOutput, PhoenixKeyValueUtil.maybeCopyCell(cell));
     }
   }
 
@@ -255,7 +255,7 @@ public class IndexedWALEditCodec extends WALCellCodec {
         this.compressedKvEncoder.write(cell);
       }
       else{
-        KeyValueCodec.write(this.dataOutput, KeyValueUtil.ensureKeyValue(cell));
+        KeyValueCodec.write(this.dataOutput, PhoenixKeyValueUtil.maybeCopyCell(cell));
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java
index 6d89c99..6e3956f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java
@@ -29,6 +29,7 @@ import java.util.Iterator;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.io.WritableUtils;
@@ -43,7 +44,7 @@ import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.Closeables;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.TupleUtil;
 
 import com.google.common.base.Preconditions;
@@ -201,8 +202,8 @@ public class SpillManager implements Closeable {
             input.skip(keyLength);
             int valueLength = WritableUtils.readVInt(input);
             int vIntValLength = WritableUtils.getVIntSize(keyLength);
-            KeyValue keyValue =
-                    KeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(),
+            Cell keyValue =
+                    PhoenixKeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(),
                         QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN,
                         QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength);
             Tuple result = new SingleKeyValueTuple(keyValue);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index dc0ae21..69d5144 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -51,7 +51,7 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.memory.InsufficientMemoryException;
 import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
 import org.apache.phoenix.util.Closeables;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -364,7 +364,7 @@ public class SpillableGroupByCache implements GroupByCache {
                             + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators "
                             + aggs.toString() + " value = " + Bytes.toStringBinary(value));
                 }
-                results.add(KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY,
+                results.add(PhoenixKeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY,
                         SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
                 return cacheIter.hasNext();
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
index 839e7c9..5c20f4d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
@@ -31,9 +31,7 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -67,6 +65,7 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.SizedUtil;
 
 public class ListJarsQueryPlan implements QueryPlan {
@@ -163,9 +162,9 @@ public class ListJarsQueryPlan implements QueryPlan {
                     expression.evaluate(null, ptr);
                     byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
                     Cell cell =
-                            CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY,
+                            PhoenixKeyValueUtil.newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY,
                                 HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(),
-                                Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
+                                HConstants.EMPTY_BYTE_ARRAY);
                     List<Cell> cells = new ArrayList<Cell>(1);
                     cells.add(cell);
                     return new ResultTuple(Result.create(cells));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
index 8e63fa9..343c8f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
@@ -25,7 +25,7 @@ import static org.apache.phoenix.query.QueryConstants.UNGROUPED_AGG_ROW_KEY;
 import java.sql.SQLException;
 import java.util.List;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.iterate.ParallelIteratorFactory;
@@ -35,7 +35,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PLong;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 /**
  * Factory class used to instantiate an iterator to handle mutations made during a parallel scan.
@@ -66,7 +66,7 @@ public abstract class MutatingParallelIteratorFactory implements ParallelIterato
         final MutationState finalState = state;
         
         byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
-        KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
+        Cell keyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
         final Tuple tuple = new SingleKeyValueTuple(keyValue);
         return new PeekingResultIterator() {
             private boolean done = false;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
index 62e6991..4e5ef84 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
@@ -25,9 +25,7 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -64,6 +62,7 @@ import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.SizedUtil;
 
 public class TraceQueryPlan implements QueryPlan {
@@ -167,9 +166,11 @@ public class TraceQueryPlan implements QueryPlan {
                 expression.evaluate(null, ptr);
                 byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
                 Cell cell =
-                        CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY,
-                            HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(),
-                            Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
+                        PhoenixKeyValueUtil
+                                .newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY,
+                                    HConstants.EMPTY_BYTE_ARRAY,
+                                    EnvironmentEdgeManager.currentTimeMillis(),
+                                    HConstants.EMPTY_BYTE_ARRAY);
                 List<Cell> cells = new ArrayList<Cell>(1);
                 cells.add(cell);
                 return new ResultTuple(Result.create(cells));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 67cc114..7c4d06d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -39,7 +39,6 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -71,7 +70,7 @@ import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SizedUtil;
@@ -297,7 +296,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             long estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize());
             chunk.resize(estSize);
 
-            final List<KeyValue> aggResults = new ArrayList<KeyValue>(aggregateMap.size());
+            final List<Cell> aggResults = new ArrayList<Cell>(aggregateMap.size());
 
             final Iterator<Map.Entry<ImmutableBytesPtr, Aggregator[]>> cacheIter =
                     aggregateMap.entrySet().iterator();
@@ -314,8 +313,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                             + " with aggregators " + Arrays.asList(rowAggregators).toString()
                             + " value = " + Bytes.toStringBinary(value), customAnnotations));
                 }
-                KeyValue keyValue =
-                        KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(),
+                Cell keyValue =
+                        PhoenixKeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(),
                             SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0,
                             value.length);
                 aggResults.add(keyValue);
@@ -522,8 +521,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
 
                 if (currentKey != null) {
                     byte[] value = aggregators.toBytes(rowAggregators);
-                    KeyValue keyValue =
-                            KeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(),
+                    Cell keyValue =
+                            PhoenixKeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(),
                                 currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN,
                                 AGG_TIMESTAMP, value, 0, value.length);
                     results.add(keyValue);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 5dbf765..a87e961 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -98,6 +98,11 @@ import java.util.NavigableMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.CellBuilder.DataType;
+import org.apache.hadoop.hbase.CellBuilderFactory;
+import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -105,6 +110,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -225,7 +231,7 @@ import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -266,39 +272,38 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     // Column to track tables that have been upgraded based on PHOENIX-2067
     public static final String ROW_KEY_ORDER_OPTIMIZABLE = "ROW_KEY_ORDER_OPTIMIZABLE";
     public static final byte[] ROW_KEY_ORDER_OPTIMIZABLE_BYTES = Bytes.toBytes(ROW_KEY_ORDER_OPTIMIZABLE);
-
     // KeyValues for Table
-    private static final KeyValue TABLE_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
-    private static final KeyValue TABLE_SEQ_NUM_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
-    private static final KeyValue COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_COUNT_BYTES);
-    private static final KeyValue SALT_BUCKETS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SALT_BUCKETS_BYTES);
-    private static final KeyValue PK_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PK_NAME_BYTES);
-    private static final KeyValue DATA_TABLE_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
-    private static final KeyValue INDEX_STATE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
-    private static final KeyValue IMMUTABLE_ROWS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IMMUTABLE_ROWS_BYTES);
-    private static final KeyValue VIEW_EXPRESSION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_STATEMENT_BYTES);
-    private static final KeyValue DEFAULT_COLUMN_FAMILY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_COLUMN_FAMILY_NAME_BYTES);
-    private static final KeyValue DISABLE_WAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DISABLE_WAL_BYTES);
-    private static final KeyValue MULTI_TENANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MULTI_TENANT_BYTES);
-    private static final KeyValue VIEW_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES);
-    private static final KeyValue VIEW_INDEX_ID_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_BYTES);
-    private static final KeyValue INDEX_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES);
-    private static final KeyValue INDEX_DISABLE_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
-    private static final KeyValue STORE_NULLS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES);
-    private static final KeyValue EMPTY_KEYVALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES);
-    private static final KeyValue BASE_COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES);
-    private static final KeyValue ROW_KEY_ORDER_OPTIMIZABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES);
-    private static final KeyValue TRANSACTIONAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TRANSACTIONAL_BYTES);
-    private static final KeyValue UPDATE_CACHE_FREQUENCY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES);
-    private static final KeyValue IS_NAMESPACE_MAPPED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY,
+    private static final Cell TABLE_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
+    private static final Cell TABLE_SEQ_NUM_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
+    private static final Cell COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_COUNT_BYTES);
+    private static final Cell SALT_BUCKETS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SALT_BUCKETS_BYTES);
+    private static final Cell PK_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PK_NAME_BYTES);
+    private static final Cell DATA_TABLE_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
+    private static final Cell INDEX_STATE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
+    private static final Cell IMMUTABLE_ROWS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IMMUTABLE_ROWS_BYTES);
+    private static final Cell VIEW_EXPRESSION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_STATEMENT_BYTES);
+    private static final Cell DEFAULT_COLUMN_FAMILY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_COLUMN_FAMILY_NAME_BYTES);
+    private static final Cell DISABLE_WAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DISABLE_WAL_BYTES);
+    private static final Cell MULTI_TENANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MULTI_TENANT_BYTES);
+    private static final Cell VIEW_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES);
+    private static final Cell VIEW_INDEX_ID_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_BYTES);
+    private static final Cell INDEX_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES);
+    private static final Cell INDEX_DISABLE_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
+    private static final Cell STORE_NULLS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES);
+    private static final Cell EMPTY_KEYVALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES);
+    private static final Cell BASE_COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES);
+    private static final Cell ROW_KEY_ORDER_OPTIMIZABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES);
+    private static final Cell TRANSACTIONAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TRANSACTIONAL_BYTES);
+    private static final Cell UPDATE_CACHE_FREQUENCY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES);
+    private static final Cell IS_NAMESPACE_MAPPED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY,
             TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES);
-    private static final KeyValue AUTO_PARTITION_SEQ_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES);
-    private static final KeyValue APPEND_ONLY_SCHEMA_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, APPEND_ONLY_SCHEMA_BYTES);
-    private static final KeyValue STORAGE_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORAGE_SCHEME_BYTES);
-    private static final KeyValue ENCODING_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ENCODING_SCHEME_BYTES);
-    private static final KeyValue USE_STATS_FOR_PARALLELIZATION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_STATS_FOR_PARALLELIZATION_BYTES);
+    private static final Cell AUTO_PARTITION_SEQ_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES);
+    private static final Cell APPEND_ONLY_SCHEMA_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, APPEND_ONLY_SCHEMA_BYTES);
+    private static final Cell STORAGE_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORAGE_SCHEME_BYTES);
+    private static final Cell ENCODING_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ENCODING_SCHEME_BYTES);
+    private static final Cell USE_STATS_FOR_PARALLELIZATION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_STATS_FOR_PARALLELIZATION_BYTES);
     
-    private static final List<KeyValue> TABLE_KV_COLUMNS = Arrays.<KeyValue>asList(
+    private static final List<Cell> TABLE_KV_COLUMNS = Arrays.<Cell>asList(
             EMPTY_KEYVALUE_KV,
             TABLE_TYPE_KV,
             TABLE_SEQ_NUM_KV,
@@ -329,7 +334,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             USE_STATS_FOR_PARALLELIZATION_KV
             );
     static {
-        Collections.sort(TABLE_KV_COLUMNS, KeyValue.COMPARATOR);
+        Collections.sort(TABLE_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
     }
 
     private static final int TABLE_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(TABLE_TYPE_KV);
@@ -361,20 +366,20 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private static final int USE_STATS_FOR_PARALLELIZATION_INDEX = TABLE_KV_COLUMNS.indexOf(USE_STATS_FOR_PARALLELIZATION_KV);
 
     // KeyValues for Column
-    private static final KeyValue DECIMAL_DIGITS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES);
-    private static final KeyValue COLUMN_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES);
-    private static final KeyValue NULLABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NULLABLE_BYTES);
-    private static final KeyValue DATA_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES);
-    private static final KeyValue ORDINAL_POSITION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES);
-    private static final KeyValue SORT_ORDER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES);
-    private static final KeyValue ARRAY_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES);
-    private static final KeyValue VIEW_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES);
-    private static final KeyValue IS_VIEW_REFERENCED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES);
-    private static final KeyValue COLUMN_DEF_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES);
-    private static final KeyValue IS_ROW_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ROW_TIMESTAMP_BYTES);
-    private static final KeyValue COLUMN_QUALIFIER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_BYTES);
-
-    private static final List<KeyValue> COLUMN_KV_COLUMNS = Arrays.<KeyValue>asList(
+    private static final Cell DECIMAL_DIGITS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES);
+    private static final Cell COLUMN_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES);
+    private static final Cell NULLABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NULLABLE_BYTES);
+    private static final Cell DATA_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES);
+    private static final Cell ORDINAL_POSITION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES);
+    private static final Cell SORT_ORDER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES);
+    private static final Cell ARRAY_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES);
+    private static final Cell VIEW_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES);
+    private static final Cell IS_VIEW_REFERENCED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES);
+    private static final Cell COLUMN_DEF_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES);
+    private static final Cell IS_ROW_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ROW_TIMESTAMP_BYTES);
+    private static final Cell COLUMN_QUALIFIER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_BYTES);
+
+    private static final List<Cell> COLUMN_KV_COLUMNS = Arrays.<Cell>asList(
             DECIMAL_DIGITS_KV,
             COLUMN_SIZE_KV,
             NULLABLE_KV,
@@ -390,9 +395,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             COLUMN_QUALIFIER_KV
             );
     static {
-        Collections.sort(COLUMN_KV_COLUMNS, KeyValue.COMPARATOR);
+        Collections.sort(COLUMN_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
     }
-    private static final KeyValue QUALIFIER_COUNTER_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_COUNTER_BYTES);
+    private static final Cell QUALIFIER_COUNTER_KV = KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_COUNTER_BYTES);
     private static final int DECIMAL_DIGITS_INDEX = COLUMN_KV_COLUMNS.indexOf(DECIMAL_DIGITS_KV);
     private static final int COLUMN_SIZE_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_SIZE_KV);
     private static final int NULLABLE_INDEX = COLUMN_KV_COLUMNS.indexOf(NULLABLE_KV);
@@ -408,18 +413,18 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     
     private static final int LINK_TYPE_INDEX = 0;
 
-    private static final KeyValue CLASS_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CLASS_NAME_BYTES);
-    private static final KeyValue JAR_PATH_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES);
-    private static final KeyValue RETURN_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, RETURN_TYPE_BYTES);
-    private static final KeyValue NUM_ARGS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NUM_ARGS_BYTES);
-    private static final KeyValue TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TYPE_BYTES);
-    private static final KeyValue IS_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_CONSTANT_BYTES);
-    private static final KeyValue DEFAULT_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_VALUE_BYTES);
-    private static final KeyValue MIN_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MIN_VALUE_BYTES);
-    private static final KeyValue MAX_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_VALUE_BYTES);
-    private static final KeyValue IS_ARRAY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ARRAY_BYTES);
-
-    private static final List<KeyValue> FUNCTION_KV_COLUMNS = Arrays.<KeyValue>asList(
+    private static final Cell CLASS_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CLASS_NAME_BYTES);
+    private static final Cell JAR_PATH_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES);
+    private static final Cell RETURN_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, RETURN_TYPE_BYTES);
+    private static final Cell NUM_ARGS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NUM_ARGS_BYTES);
+    private static final Cell TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TYPE_BYTES);
+    private static final Cell IS_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_CONSTANT_BYTES);
+    private static final Cell DEFAULT_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_VALUE_BYTES);
+    private static final Cell MIN_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MIN_VALUE_BYTES);
+    private static final Cell MAX_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_VALUE_BYTES);
+    private static final Cell IS_ARRAY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ARRAY_BYTES);
+
+    private static final List<Cell> FUNCTION_KV_COLUMNS = Arrays.<Cell>asList(
         EMPTY_KEYVALUE_KV,
         CLASS_NAME_KV,
         JAR_PATH_KV,
@@ -427,7 +432,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         NUM_ARGS_KV
         );
     static {
-        Collections.sort(FUNCTION_KV_COLUMNS, KeyValue.COMPARATOR);
+        Collections.sort(FUNCTION_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
     }
     
     private static final int CLASS_NAME_INDEX = FUNCTION_KV_COLUMNS.indexOf(CLASS_NAME_KV);
@@ -435,7 +440,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private static final int RETURN_TYPE_INDEX = FUNCTION_KV_COLUMNS.indexOf(RETURN_TYPE_KV);
     private static final int NUM_ARGS_INDEX = FUNCTION_KV_COLUMNS.indexOf(NUM_ARGS_KV);
 
-    private static final List<KeyValue> FUNCTION_ARG_KV_COLUMNS = Arrays.<KeyValue>asList(
+    private static final List<Cell> FUNCTION_ARG_KV_COLUMNS = Arrays.<Cell>asList(
         TYPE_KV,
         IS_ARRAY_KV,
         IS_CONSTANT_KV,
@@ -444,7 +449,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         MAX_VALUE_KV
         );
     static {
-        Collections.sort(FUNCTION_ARG_KV_COLUMNS, KeyValue.COMPARATOR);
+        Collections.sort(FUNCTION_ARG_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
     }
     
     private static final int IS_ARRAY_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(IS_ARRAY_KV);
@@ -718,7 +723,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             PInteger.INSTANCE.getCodec().decodeInt(arraySizeKv.getValueArray(), arraySizeKv.getValueOffset(), SortOrder.getDefault());
 
         Cell viewConstantKv = colKeyValues[VIEW_CONSTANT_INDEX];
-        byte[] viewConstant = viewConstantKv == null ? null : viewConstantKv.getValue();
+        byte[] viewConstant =
+                viewConstantKv == null ? null : new ImmutableBytesPtr(
+                        viewConstantKv.getValueArray(), viewConstantKv.getValueOffset(),
+                        viewConstantKv.getValueLength()).copyBytesIfNecessary();
         Cell isViewReferencedKv = colKeyValues[IS_VIEW_REFERENCED_INDEX];
         boolean isViewReferenced = isViewReferencedKv != null && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isViewReferencedKv.getValueArray(), isViewReferencedKv.getValueOffset(), isViewReferencedKv.getValueLength()));
         Cell columnDefKv = colKeyValues[COLUMN_DEF_INDEX];
@@ -1360,6 +1368,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     public void createTable(RpcController controller, CreateTableRequest request,
             RpcCallback<MetaDataResponse> done) {
         MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
+        CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
         byte[][] rowKeyMetaData = new byte[3][];
         byte[] schemaName = null;
         byte[] tableName = null;
@@ -1544,10 +1553,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     else { 
                         viewStatement = Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), parentTable.getTableName().getString(), autoPartitionWhere));
                     }
-                    Cell viewStatementCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_STATEMENT_BYTES,
-                        cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), viewStatement);
-                    cells.add(viewStatementCell);
-                    
+                    cellBuilder
+                            .clear()
+                            .setRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
+                            .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(),
+                                cell.getFamilyLength()).setQualifier(VIEW_STATEMENT_BYTES)
+                            .setTimestamp(cell.getTimestamp()).setType(DataType.Put)
+                            .setValue(viewStatement);
+                    cells.add(cellBuilder.build());
                     // set the IS_VIEW_REFERENCED column of the auto partition column row
                     Put autoPartitionPut = MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata);
                     familyCellMap = autoPartitionPut.getFamilyCellMap();
@@ -1557,9 +1570,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
                     byte[] bytes = new byte [dataType.getByteSize() + 1];
                     dataType.toBytes(val, bytes, 0);
-                    Cell viewConstantCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_CONSTANT_BYTES,
-                        cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
-                    cells.add(viewConstantCell);
+                    cellBuilder
+                            .clear()
+                            .setRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
+                            .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(),
+                                cell.getFamilyLength()).setQualifier(VIEW_CONSTANT_BYTES)
+                            .setTimestamp(cell.getTimestamp()).setType(DataType.Put)
+                            .setValue(bytes);
+                    cells.add(cellBuilder.build());
                 }
                 Short indexId = null;
                 if (request.hasAllocateIndexId() && request.getAllocateIndexId()) {
@@ -1600,9 +1618,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         Object val = dataType.toObject(seqValue, PLong.INSTANCE);
                         byte[] bytes = new byte [dataType.getByteSize() + 1];
                         dataType.toBytes(val, bytes, 0);
-                        Cell indexIdCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_INDEX_ID_BYTES,
-                            cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
-                        cells.add(indexIdCell);
+                        cellBuilder
+                                .clear()
+                                .setRow(cell.getRowArray(), cell.getRowOffset(),
+                                    cell.getRowLength())
+                                .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(),
+                                    cell.getFamilyLength()).setQualifier(VIEW_INDEX_ID_BYTES)
+                                .setTimestamp(cell.getTimestamp()).setType(DataType.Put)
+                                .setValue(bytes);
+                        cells.add(cellBuilder.build());
                         indexId = (short) seqValue;
                     }
                 }
@@ -1845,9 +1869,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             List<RowLock> locks = Lists.newArrayList();
 
             try {
-                acquireLock(region, lockKey, locks);
+                ServerUtil.acquireLock(region, lockKey, locks);
                 if (key != lockKey) {
-                    acquireLock(region, key, locks);
+                    ServerUtil.acquireLock(region, key, locks);
                 }
                 List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
                 result =
@@ -2303,6 +2327,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             Region region, List<RowLock> locks, int clientVersion) throws IOException, SQLException {
         List<PutWithOrdinalPosition> columnPutsForBaseTable = Lists.newArrayListWithExpectedSize(tableMetadata.size());
         Map<TableProperty, Cell> tablePropertyCellMap = Maps.newHashMapWithExpectedSize(tableMetadata.size());
+        CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
         // Isolate the puts relevant to adding columns. Also figure out what kind of columns are being added.
         for (Mutation m : tableMetadata) {
             if (m instanceof Put) {
@@ -2324,9 +2349,21 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                             byte[] propNameBytes = Bytes.toBytes(tableProp.getPropertyName());
                             if (Bytes.compareTo(propNameBytes, 0, propNameBytes.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())==0
                                     && tableProp.isValidOnView() && tableProp.isMutable()) {
-                                Cell tablePropCell = CellUtil.createCell(cell.getRow(), CellUtil.cloneFamily(cell),
-                                    CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(),
-                                    CellUtil.cloneValue(cell));
+                                Cell tablePropCell =
+                                        cellBuilder
+                                                .clear()
+                                                .setRow(cell.getRowArray(), cell.getRowOffset(),
+                                                    cell.getRowLength())
+                                                .setFamily(cell.getFamilyArray(),
+                                                    cell.getFamilyOffset(), cell.getFamilyLength())
+                                                .setQualifier(cell.getQualifierArray(),
+                                                    cell.getQualifierOffset(),
+                                                    cell.getQualifierLength())
+                                                .setTimestamp(cell.getTimestamp())
+                                                .setType(DataType.Put)
+                                                .setValue(cell.getValueArray(),
+                                                    cell.getValueOffset(), cell.getValueLength())
+                                                .build();
                                 tablePropertyCellMap.put(tableProp, tablePropCell);
                             }
                         }
@@ -2432,9 +2469,19 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     // The column doesn't exist in the view.
                     Put viewColumnPut = new Put(columnKey, clientTimeStamp);
                     for (Cell cell : baseTableColumnPut.getFamilyCellMap().values().iterator().next()) {
-                        viewColumnPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell),
-                                CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(),
-                                CellUtil.cloneValue(cell)));
+                        Cell newCell =
+                                cellBuilder
+                                        .clear()
+                                        .setRow(columnKey)
+                                        .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(),
+                                            cell.getFamilyLength())
+                                        .setQualifier(cell.getQualifierArray(),
+                                            cell.getQualifierOffset(), cell.getQualifierLength())
+                                        .setTimestamp(cell.getTimestamp())
+                                        .setType(DataType.Put)
+                                        .setValue(cell.getValueArray(), cell.getValueOffset(),
+                                            cell.getValueLength()).build();
+                        viewColumnPut.add(newCell);
                     }
                     if (isDivergedView(view)) {
                         if (isPkCol) {
@@ -3556,7 +3603,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 }
                 if ((currentState == PIndexState.ACTIVE || currentState == PIndexState.PENDING_ACTIVE) && newState == PIndexState.UNUSABLE) {
                     newState = PIndexState.INACTIVE;
-                    newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
+                    newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
                         INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
                 } else if ((currentState == PIndexState.INACTIVE || currentState == PIndexState.PENDING_ACTIVE) && newState == PIndexState.USABLE) {
                     // Don't allow manual state change to USABLE (i.e. ACTIVE) if non zero INDEX_DISABLE_TIMESTAMP
@@ -3565,7 +3612,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     } else {
                         newState = PIndexState.ACTIVE;
                     }
-                    newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
+                    newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
                         INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
                 }
 


[3/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
index c004818..68b36f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
@@ -52,7 +52,7 @@ import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.SequenceUtil;
 import org.apache.phoenix.util.ServerUtil;
@@ -84,7 +84,7 @@ public class SequenceRegionObserver implements RegionObserver {
         byte[] errorCodeBuf = new byte[PInteger.INSTANCE.getByteSize()];
         PInteger.INSTANCE.getCodec().encodeInt(errorCode, errorCodeBuf, 0);
         return  Result.create(Collections.singletonList(
-                (Cell)KeyValueUtil.newKeyValue(row, 
+                PhoenixKeyValueUtil.newKeyValue(row, 
                         PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, 
                         QueryConstants.EMPTY_COLUMN_BYTES, timestamp, errorCodeBuf)));
     }
@@ -139,9 +139,9 @@ public class SequenceRegionObserver implements RegionObserver {
                 }
                 
                  
-                KeyValue currentValueKV = Sequence.getCurrentValueKV(result);
-                KeyValue incrementByKV = Sequence.getIncrementByKV(result);
-                KeyValue cacheSizeKV = Sequence.getCacheSizeKV(result);
+                Cell currentValueKV = Sequence.getCurrentValueKV(result);
+                Cell incrementByKV = Sequence.getIncrementByKV(result);
+                Cell cacheSizeKV = Sequence.getCacheSizeKV(result);
                 
                 long currentValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault());
                 long incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), SortOrder.getDefault());
@@ -161,15 +161,15 @@ public class SequenceRegionObserver implements RegionObserver {
                 	currentValue += incrementBy * cacheSize;
                     // Hold timestamp constant for sequences, so that clients always only see the latest value
                     // regardless of when they connect.
-                    KeyValue newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp);
+                    Cell newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp);
                     put.add(newCurrentValueKV);
                     Sequence.replaceCurrentValueKV(cells, newCurrentValueKV);
                 }
                 else {
-	                KeyValue cycleKV = Sequence.getCycleKV(result);
-	                KeyValue limitReachedKV = Sequence.getLimitReachedKV(result);
-	                KeyValue minValueKV = Sequence.getMinValueKV(result);
-	                KeyValue maxValueKV = Sequence.getMaxValueKV(result);
+	                Cell cycleKV = Sequence.getCycleKV(result);
+	                Cell limitReachedKV = Sequence.getLimitReachedKV(result);
+	                Cell minValueKV = Sequence.getMinValueKV(result);
+	                Cell maxValueKV = Sequence.getMaxValueKV(result);
 	                
 	                boolean increasingSeq = incrementBy > 0 ? true : false;
 	                
@@ -179,7 +179,7 @@ public class SequenceRegionObserver implements RegionObserver {
 	                boolean limitReached;
 	                if (limitReachedKV == null) {
 	                	limitReached = false;
-	                	KeyValue newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp);
+	                	Cell newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp);
 	                	put.add(newLimitReachedKV);
 	                	Sequence.replaceLimitReachedKV(cells, newLimitReachedKV);
 	                }
@@ -190,7 +190,7 @@ public class SequenceRegionObserver implements RegionObserver {
 	                long minValue;
 	                if (minValueKV == null) {
 	                    minValue = Long.MIN_VALUE;
-	                    KeyValue newMinValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, minValue, timestamp);
+	                    Cell newMinValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, minValue, timestamp);
 	                    put.add(newMinValueKV);
 	                    Sequence.replaceMinValueKV(cells, newMinValueKV);
 	                }
@@ -201,7 +201,7 @@ public class SequenceRegionObserver implements RegionObserver {
 	                long maxValue;
 	                if (maxValueKV == null) {
 	                    maxValue = Long.MAX_VALUE;
-	                    KeyValue newMaxValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, maxValue, timestamp);
+	                    Cell newMaxValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, maxValue, timestamp);
 	                    put.add(newMaxValueKV);
 	                    Sequence.replaceMaxValueKV(cells, newMaxValueKV);
 	                }
@@ -212,7 +212,7 @@ public class SequenceRegionObserver implements RegionObserver {
 	                boolean cycle;
 	                if (cycleKV == null) {
 	                    cycle = false;
-	                    KeyValue newCycleKV = createKeyValue(row, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, cycle, timestamp);
+	                    Cell newCycleKV = createKeyValue(row, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, cycle, timestamp);
 	                    put.add(newCycleKV);
 	                    Sequence.replaceCycleValueKV(cells, newCycleKV);
 	                }
@@ -260,11 +260,11 @@ public class SequenceRegionObserver implements RegionObserver {
                     // update currentValue
 					currentValue += incrementBy * (SequenceUtil.isBulkAllocation(numSlotsToAllocate) ? numSlotsToAllocate : cacheSize);
 					// update the currentValue of the Result row
-					KeyValue newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp);
+					Cell newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp);
 		            Sequence.replaceCurrentValueKV(cells, newCurrentValueKV);
 		            put.add(newCurrentValueKV);
 					// set the LIMIT_REACHED column to true, so that no new values can be used
-					KeyValue newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp);
+					Cell newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp);
 		            put.add(newLimitReachedKV);
                 }
                 // update the KeyValues on the server
@@ -293,10 +293,10 @@ public class SequenceRegionObserver implements RegionObserver {
 	 *            column qualifier of KeyValue
 	 * @return return the KeyValue that was created
 	 */
-	KeyValue createKeyValue(byte[] key, byte[] cqBytes, long value, long timestamp) {
+	Cell createKeyValue(byte[] key, byte[] cqBytes, long value, long timestamp) {
 		byte[] valueBuffer = new byte[PLong.INSTANCE.getByteSize()];
-    PLong.INSTANCE.getCodec().encodeLong(value, valueBuffer, 0);
-		return KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, cqBytes, timestamp, valueBuffer);
+		PLong.INSTANCE.getCodec().encodeLong(value, valueBuffer, 0);
+		return PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, cqBytes, timestamp, valueBuffer);
 	}
     
 	/**
@@ -308,9 +308,9 @@ public class SequenceRegionObserver implements RegionObserver {
 	 *            column qualifier of KeyValue
 	 * @return return the KeyValue that was created
 	 */
-	private KeyValue createKeyValue(byte[] key, byte[] cqBytes, boolean value, long timestamp) throws IOException {
+	private Cell createKeyValue(byte[] key, byte[] cqBytes, boolean value, long timestamp) throws IOException {
 		// create new key value for put
-		return KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, cqBytes, 
+		return PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, cqBytes, 
 				timestamp, value ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES);
 	}
 
@@ -397,7 +397,7 @@ public class SequenceRegionObserver implements RegionObserver {
                     // Timestamp should match exactly, or we may have the wrong sequence
                     if (expectedValue != value || currentValueKV.getTimestamp() != clientTimestamp) {
                         return Result.create(Collections.singletonList(
-                          (Cell)KeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, 
+                          (Cell)PhoenixKeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, 
                             QueryConstants.EMPTY_COLUMN_BYTES, currentValueKV.getTimestamp(), ByteUtil.EMPTY_BYTE_ARRAY)));
                     }
                     m = new Put(row, currentValueKV.getTimestamp());
@@ -425,7 +425,7 @@ public class SequenceRegionObserver implements RegionObserver {
                 // the client cares about is the timestamp, which is the timestamp of
                 // when the mutation was actually performed (useful in the case of .
                 return Result.create(Collections.singletonList(
-                  (Cell)KeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, serverTimestamp, SUCCESS_VALUE)));
+                  (Cell)PhoenixKeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, serverTimestamp, SUCCESS_VALUE)));
             } finally {
                 ServerUtil.releaseRowLocks(locks);
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index ab6309c..82bfc07 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -131,7 +131,7 @@ import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.ExpressionUtil;
 import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -332,7 +332,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         public boolean add(Mutation e) {
             boolean r = super.add(e);
             if (r) {
-                this.byteSize += KeyValueUtil.calculateMutationDiskSize(e);
+                this.byteSize += PhoenixKeyValueUtil.calculateMutationDiskSize(e);
             }
             return r;
         }
@@ -797,12 +797,12 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         }
 
         final boolean hadAny = hasAny;
-        KeyValue keyValue = null;
+        Cell keyValue = null;
         if (hadAny) {
             byte[] value = aggregators.toBytes(rowAggregators);
-            keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
+            keyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
         }
-        final KeyValue aggKeyValue = keyValue;
+        final Cell aggKeyValue = keyValue;
 
         RegionScanner scanner = new BaseRegionScanner(innerScanner) {
             private boolean done = !hadAny;
@@ -1096,7 +1096,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             region.closeRegionOperation();
         }
         byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
-        final KeyValue aggKeyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
+        final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
                 SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
 
         RegionScanner scanner = new BaseRegionScanner(innerScanner) {
@@ -1154,8 +1154,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                     + region.getRegionInfo().getRegionNameAsString());
         }
         byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
-        final KeyValue aggKeyValue =
-                KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
+        final Cell aggKeyValue =
+                PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
                     SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
         RegionScanner scanner = new BaseRegionScanner(innerScanner) {
             @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index b0974c6..04ed864 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -90,7 +90,7 @@ import org.apache.phoenix.transaction.PhoenixTransactionalTable;
 import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SQLCloseable;
@@ -355,7 +355,7 @@ public class MutationState implements SQLCloseable {
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED).build()
                     .buildException();
         }
-        long estimatedSize = KeyValueUtil.getEstimatedRowSize(mutations);
+        long estimatedSize = PhoenixKeyValueUtil.getEstimatedRowSize(mutations);
         if (estimatedSize > maxSizeBytes) {
             resetState();
             throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED)
@@ -746,7 +746,7 @@ public class MutationState implements SQLCloseable {
         long byteSize = 0;
         if (GlobalClientMetrics.isMetricsEnabled()) {
             for (Mutation mutation : mutations) {
-                byteSize += KeyValueUtil.calculateMutationDiskSize(mutation);
+                byteSize += PhoenixKeyValueUtil.calculateMutationDiskSize(mutation);
             }
         }
         GLOBAL_MUTATION_BYTES.update(byteSize);
@@ -891,7 +891,6 @@ public class MutationState implements SQLCloseable {
 
     }
     
-    @SuppressWarnings("deprecation")
     private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
         int i = 0;
         long[] serverTimeStamps = null;
@@ -1085,7 +1084,7 @@ public class MutationState implements SQLCloseable {
         List<Mutation> currentList = Lists.newArrayList();
         long currentBatchSizeBytes = 0L;
         for (Mutation mutation : allMutationList) {
-            long mutationSizeBytes = KeyValueUtil.calculateMutationDiskSize(mutation);
+            long mutationSizeBytes = PhoenixKeyValueUtil.calculateMutationDiskSize(mutation);
             if (currentList.size() == batchSize || currentBatchSizeBytes + mutationSizeBytes > batchSizeBytes) {
                 if (currentList.size() > 0) {
                     mutationBatchList.add(currentList);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
index fab7c59..395a699 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
@@ -33,7 +33,6 @@ import java.util.Queue;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -67,6 +66,7 @@ import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.ResultUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
@@ -688,14 +688,14 @@ public class SortMergeJoinPlan implements QueryPlan {
 
             @Override
             protected int sizeOf(Tuple e) {
-                KeyValue kv = KeyValueUtil.ensureKeyValue(e.getValue(0));
+                KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(e.getValue(0));
                 return Bytes.SIZEOF_INT * 2 + kv.getLength();
             }
 
             @SuppressWarnings("deprecation")
             @Override
             protected void writeToBuffer(MappedByteBuffer buffer, Tuple e) {
-                KeyValue kv = KeyValueUtil.ensureKeyValue(e.getValue(0));
+                KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(e.getValue(0));
                 buffer.putInt(kv.getLength() + Bytes.SIZEOF_INT);
                 buffer.putInt(kv.getLength());
                 buffer.put(kv.getBuffer(), kv.getOffset(), kv.getLength());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
index 266bb6e..753c11d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
@@ -30,7 +30,6 @@ import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -48,7 +47,7 @@ import org.apache.phoenix.schema.ProjectedColumn;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.BaseTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.base.Preconditions;
@@ -171,7 +170,7 @@ public class TupleProjector {
         long timestamp;
         ImmutableBytesWritable projectedValue = new ImmutableBytesWritable();
         int bitSetLen;
-        KeyValue keyValue;
+        Cell keyValue;
 
         public ProjectedValueTuple(Tuple keyBase, long timestamp, byte[] projectedValue, int valueOffset, int valueLength, int bitSetLen) {
             keyBase.getKey(this.keyPtr);
@@ -209,7 +208,7 @@ public class TupleProjector {
         }
 
         @Override
-        public KeyValue getValue(int index) {
+        public Cell getValue(int index) {
             if (index != 0) {
                 throw new IndexOutOfBoundsException(Integer.toString(index));
             }
@@ -217,9 +216,9 @@ public class TupleProjector {
         }
 
         @Override
-        public KeyValue getValue(byte[] family, byte[] qualifier) {
+        public Cell getValue(byte[] family, byte[] qualifier) {
             if (keyValue == null) {
-                keyValue = KeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(), 
+                keyValue = PhoenixKeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(), 
                         VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, timestamp, projectedValue.get(), projectedValue.getOffset(), projectedValue.getLength());
             }
             return keyValue;
@@ -256,15 +255,15 @@ public class TupleProjector {
         }
 
         @Override
-        public KeyValue getValue(int index) {
+        public Cell getValue(int index) {
             if (index != 0) { throw new IndexOutOfBoundsException(Integer.toString(index)); }
             return getValue(VALUE_COLUMN_FAMILY, OLD_VALUE_COLUMN_QUALIFIER);
         }
 
         @Override
-        public KeyValue getValue(byte[] family, byte[] qualifier) {
+        public Cell getValue(byte[] family, byte[] qualifier) {
             if (keyValue == null) {
-                keyValue = KeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(),
+                keyValue = PhoenixKeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(),
                         VALUE_COLUMN_FAMILY, OLD_VALUE_COLUMN_QUALIFIER, timestamp, projectedValue.get(),
                         projectedValue.getOffset(), projectedValue.getLength());
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java
index 1280cb5..e30a6eb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java
@@ -22,7 +22,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -113,7 +113,7 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
                 }
             }
         }
-        return KeyValue.createFirstOnRow(tmp.get(), tmp.getOffset(), tmp.getLength(), null, 0, 0,
+        return KeyValueUtil.createFirstOnRow(tmp.get(), tmp.getOffset(), tmp.getLength(), null, 0, 0,
                 null, 0, 0);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
index a2edd45..ee7f074 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java
@@ -17,7 +17,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -115,7 +115,7 @@ public abstract class BaseIndexBuilder implements IndexBuilder {
     }
 
     @Override
-    public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(Collection<KeyValue> filtered, IndexMetaData context)
+    public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(Collection<Cell> filtered, IndexMetaData context)
             throws IOException {
         throw new UnsupportedOperationException();
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java
index a00294c..489c40e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java
@@ -22,7 +22,7 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Increment;
@@ -96,7 +96,7 @@ public interface IndexBuilder extends Stoppable {
      * @throws IOException on failure
      */
   public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(
-      Collection<KeyValue> filtered, IndexMetaData context)
+      Collection<Cell> filtered, IndexMetaData context)
       throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java
index e707ea2..722d64c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.hbase.index.covered;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 
 /**
@@ -29,7 +30,7 @@ public class Batch {
 
   private static final long pointDeleteCode = KeyValue.Type.Delete.getCode();
   private final long timestamp;
-  private List<KeyValue> batch = new ArrayList<KeyValue>();
+  private List<Cell> batch = new ArrayList<Cell>();
   private boolean allPointDeletes = true;
 
   /**
@@ -39,8 +40,8 @@ public class Batch {
     this.timestamp = ts;
   }
 
-  public void add(KeyValue kv){
-    if (pointDeleteCode != kv.getType()) {
+  public void add(Cell kv){
+    if (pointDeleteCode != kv.getTypeByte()) {
       allPointDeletes = false;
     }
     batch.add(kv);
@@ -54,7 +55,7 @@ public class Batch {
     return this.timestamp;
   }
 
-  public List<KeyValue> getKvs() {
+  public List<Cell> getKvs() {
     return this.batch;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java
index 30d2904..0848e29 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java
@@ -17,7 +17,7 @@
  */
 package org.apache.phoenix.hbase.index.covered;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
 
 /**
@@ -25,9 +25,9 @@ import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
  */
 public interface KeyValueStore {
 
-  public void add(KeyValue kv, boolean overwrite);
+  public void add(Cell kv, boolean overwrite);
 
   public ReseekableScanner getScanner();
 
-  public void rollback(KeyValue kv);
+  public void rollback(Cell kv);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
index f7784e5..f89a896 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
@@ -20,7 +20,6 @@ import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.util.Pair;
@@ -51,7 +50,7 @@ public class LocalTableState implements TableState {
     private Mutation update;
     private Set<ColumnTracker> trackedColumns = new HashSet<ColumnTracker>();
     private ScannerBuilder scannerBuilder;
-    private List<KeyValue> kvs = new ArrayList<KeyValue>();
+    private List<Cell> kvs = new ArrayList<Cell>();
     private List<? extends IndexedColumnGroup> hints;
     private CoveredColumns columnSet;
 
@@ -64,24 +63,24 @@ public class LocalTableState implements TableState {
         this.columnSet = new CoveredColumns();
     }
 
-    public void addPendingUpdates(KeyValue... kvs) {
+    public void addPendingUpdates(Cell... kvs) {
         if (kvs == null) return;
         addPendingUpdates(Arrays.asList(kvs));
     }
 
-    public void addPendingUpdates(List<KeyValue> kvs) {
+    public void addPendingUpdates(List<Cell> kvs) {
         if (kvs == null) return;
         setPendingUpdates(kvs);
         addUpdate(kvs);
     }
 
-    private void addUpdate(List<KeyValue> list) {
+    private void addUpdate(List<Cell> list) {
         addUpdate(list, true);
     }
 
-    private void addUpdate(List<KeyValue> list, boolean overwrite) {
+    private void addUpdate(List<Cell> list, boolean overwrite) {
         if (list == null) return;
-        for (KeyValue kv : list) {
+        for (Cell kv : list) {
             this.memstore.add(kv, overwrite);
         }
     }
@@ -90,20 +89,10 @@ public class LocalTableState implements TableState {
         if (list == null) return;
         // Avoid a copy of the Cell into a KeyValue if it's already a KeyValue
         for (Cell c : list) {
-            this.memstore.add(maybeCopyCell(c), overwrite);
+            this.memstore.add(c, overwrite);
         }
     }
 
-    private KeyValue maybeCopyCell(Cell c) {
-        // Same as KeyValueUtil, but HBase has deprecated this method. Avoid depending on something
-        // that will likely be removed at some point in time.
-        if (c == null) return null;
-        if (c instanceof KeyValue) {
-            return (KeyValue) c;
-        }
-        return KeyValueUtil.copyToNewKeyValue(c);
-    }
-
     @Override
     public RegionCoprocessorEnvironment getEnvironment() {
         return this.env;
@@ -240,7 +229,7 @@ public class LocalTableState implements TableState {
     }
 
     @Override
-    public Collection<KeyValue> getPendingUpdate() {
+    public Collection<Cell> getPendingUpdate() {
         return this.kvs;
     }
 
@@ -251,7 +240,7 @@ public class LocalTableState implements TableState {
      * @param update
      *            pending {@link KeyValue}s
      */
-    public void setPendingUpdates(Collection<KeyValue> update) {
+    public void setPendingUpdates(Collection<Cell> update) {
         this.kvs.clear();
         this.kvs.addAll(update);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
index 8dd57c0..4adc7b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
@@ -85,8 +85,7 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
         long ts = m.getFamilyCellMap().values().iterator().next().iterator().next().getTimestamp();
         Batch batch = new Batch(ts);
         for (List<Cell> family : m.getFamilyCellMap().values()) {
-            List<KeyValue> kvs = KeyValueUtil.ensureKeyValues(family);
-            for (KeyValue kv : kvs) {
+            for (Cell kv : family) {
                 batch.add(kv);
                 if(ts != kv.getTimestamp()) {
                     throw new IllegalStateException("Time stamps must match for all cells in a batch");
@@ -256,7 +255,7 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
     }
 
     @Override
-    public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(Collection<KeyValue> filtered, IndexMetaData indexMetaData)
+    public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(Collection<Cell> filtered, IndexMetaData indexMetaData)
             throws IOException {
         // TODO Implement IndexBuilder.getIndexUpdateForFilteredRows
         return null;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java
index f85de59..f520673 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java
@@ -23,7 +23,7 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.util.Pair;
@@ -84,5 +84,5 @@ public interface TableState {
    * Can be used to help the codec to determine which columns it should attempt to index.
    * @return the keyvalues in the pending update to the table.
    */
-  Collection<KeyValue> getPendingUpdate();
+  Collection<Cell> getPendingUpdate();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index 0fc9e14..dfd3774 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -17,16 +17,15 @@
  */
 package org.apache.phoenix.hbase.index.covered.data;
 
-import java.util.Comparator;
 import java.util.Iterator;
 import java.util.SortedSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.KVComparator;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.IndexKeyValueSkipListSet;
 import org.apache.hadoop.hbase.regionserver.MemStore;
@@ -34,6 +33,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.covered.KeyValueStore;
 import org.apache.phoenix.hbase.index.covered.LocalTableState;
 import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 /**
  * Like the HBase {@link MemStore}, but without all that extra work around maintaining snapshots and
@@ -76,27 +76,10 @@ public class IndexMemStore implements KeyValueStore {
 
   private static final Log LOG = LogFactory.getLog(IndexMemStore.class);
   private IndexKeyValueSkipListSet kvset;
-  private Comparator<KeyValue> comparator;
-
-  /**
-   * Compare two {@link KeyValue}s based only on their row keys. Similar to the standard
-   * {@link KeyValue#COMPARATOR}, but doesn't take into consideration the memstore timestamps. We
-   * instead manage which KeyValue to retain based on how its loaded here
-   */
-  public static final Comparator<KeyValue> COMPARATOR = new Comparator<KeyValue>() {
-
-    private final KVComparator rawcomparator = new KVComparator();
-
-    @Override
-    public int compare(final KeyValue left, final KeyValue right) {
-      return rawcomparator.compareFlatKey(left.getRowArray(), left.getOffset() + KeyValue.ROW_OFFSET,
-        left.getKeyLength(), right.getRowArray(), right.getOffset() + KeyValue.ROW_OFFSET,
-        right.getKeyLength());
-    }
-  };
+  private CellComparator comparator;
 
   public IndexMemStore() {
-    this(COMPARATOR);
+    this(CellComparatorImpl.COMPARATOR);
   }
 
   /**
@@ -106,13 +89,13 @@ public class IndexMemStore implements KeyValueStore {
    * Exposed for subclassing/testing.
    * @param comparator to use
    */
-  IndexMemStore(Comparator<KeyValue> comparator) {
+  IndexMemStore(CellComparator comparator) {
     this.comparator = comparator;
     this.kvset = IndexKeyValueSkipListSet.create(comparator);
   }
 
   @Override
-  public void add(KeyValue kv, boolean overwrite) {
+  public void add(Cell kv, boolean overwrite) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("Inserting: " + toString(kv));
     }
@@ -131,19 +114,19 @@ public class IndexMemStore implements KeyValueStore {
 
   private void dump() {
     LOG.trace("Current kv state:\n");
-    for (KeyValue kv : this.kvset) {
+    for (Cell kv : this.kvset) {
       LOG.trace("KV: " + toString(kv));
     }
     LOG.trace("========== END MemStore Dump ==================\n");
   }
 
-  private String toString(KeyValue kv) {
+  private String toString(Cell kv) {
     return kv.toString() + "/value=" + 
         Bytes.toStringBinary(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
   }
 
   @Override
-  public void rollback(KeyValue kv) {
+  public void rollback(Cell kv) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("Rolling back: " + toString(kv));
     }
@@ -169,13 +152,13 @@ public class IndexMemStore implements KeyValueStore {
   // set, rather than a primary and a secondary set of KeyValues.
   protected class MemStoreScanner implements ReseekableScanner {
     // Next row information for the set
-    private KeyValue nextRow = null;
+    private Cell nextRow = null;
 
     // last iterated KVs for kvset and snapshot (to restore iterator state after reseek)
-    private KeyValue kvsetItRow = null;
+    private Cell kvsetItRow = null;
 
     // iterator based scanning.
-    private Iterator<KeyValue> kvsetIt;
+    private Iterator<Cell> kvsetIt;
 
     // The kvset at the time of creating this scanner
     volatile IndexKeyValueSkipListSet kvsetAtCreation;
@@ -185,12 +168,12 @@ public class IndexMemStore implements KeyValueStore {
       kvsetAtCreation = kvset;
     }
 
-    private KeyValue getNext(Iterator<KeyValue> it) {
+    private Cell getNext(Iterator<Cell> it) {
       // in the original implementation we cared about the current thread's readpoint from MVCC.
       // However, we don't need to worry here because everything the index can see, is also visible
       // to the client (or is the pending primary table update, so it will be once the index is
       // written, so it might as well be).
-      KeyValue v = null;
+      Cell v = null;
       try {
         while (it.hasNext()) {
           v = it.next();
@@ -220,7 +203,7 @@ public class IndexMemStore implements KeyValueStore {
 
       // kvset and snapshot will never be null.
       // if tailSet can't find anything, SortedSet is empty (not null).
-      kvsetIt = kvsetAtCreation.tailSet(KeyValueUtil.ensureKeyValue(key)).iterator();
+      kvsetIt = kvsetAtCreation.tailSet(PhoenixKeyValueUtil.maybeCopyCell(key)).iterator();
       kvsetItRow = null;
 
       return seekInSubLists();
@@ -250,7 +233,7 @@ public class IndexMemStore implements KeyValueStore {
        * Unfortunately the Java API does not offer a method to get it. So we remember the last keys
        * we iterated to and restore the reseeked set to at least that point.
        */
-      kvsetIt = kvsetAtCreation.tailSet(getHighest(KeyValueUtil.ensureKeyValue(key), kvsetItRow)).iterator();
+      kvsetIt = kvsetAtCreation.tailSet(getHighest(PhoenixKeyValueUtil.maybeCopyCell(key), kvsetItRow)).iterator();
       return seekInSubLists();
     }
 
@@ -258,7 +241,7 @@ public class IndexMemStore implements KeyValueStore {
      * Returns the higher of the two key values, or null if they are both null. This uses
      * comparator.compare() to compare the KeyValue using the memstore comparator.
      */
-    private KeyValue getHighest(KeyValue first, KeyValue second) {
+    private Cell getHighest(Cell first, Cell second) {
       if (first == null && second == null) {
         return null;
       }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java
index 9968627..5b06910 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java
@@ -20,7 +20,7 @@ package org.apache.phoenix.hbase.index.covered.data;
 import java.io.IOException;
 import java.util.Collection;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -40,7 +40,7 @@ public interface LocalHBaseState {
    * @return the full state of the given row. Includes all current versions (even if they are not
    *         usually visible to the client (unless they are also doing a raw scan)). Never returns a
    *         <tt>null</tt> {@link Result} - instead, when there is not data for the row, returns a
-   *         {@link Result} with no stored {@link KeyValue}s.
+   *         {@link Result} with no stored {@link Cell}s.
    * @throws IOException if there is an issue reading the row
    */
   public Result getCurrentRowState(Mutation m, Collection<? extends ColumnReference> toCover, boolean ignoreNewerMutations)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
index a1f01ed..67049f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
@@ -27,10 +27,10 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 /**
  * Only allow the 'latest' timestamp of each family:qualifier pair, ensuring that they aren't
@@ -101,7 +101,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
   
   @Override
   public Cell getNextCellHint(Cell peeked){
-    return currentHint.getHint(KeyValueUtil.ensureKeyValue(peeked));
+    return currentHint.getHint(PhoenixKeyValueUtil.maybeCopyCell(peeked));
   }
 
   @Override
@@ -111,7 +111,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
       return ReturnCode.SKIP;
     }
 
-    KeyValue nextKV = KeyValueUtil.ensureKeyValue(next);
+    KeyValue nextKV = PhoenixKeyValueUtil.maybeCopyCell(next);
     switch (KeyValue.Type.codeToType(next.getTypeByte())) {
     /*
      * DeleteFamily will always sort first because those KVs (we assume) don't have qualifiers (or
@@ -170,7 +170,7 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
    * Get the next hint for a given peeked keyvalue
    */
   interface Hinter {
-    public abstract KeyValue getHint(KeyValue peek);
+    public abstract Cell getHint(Cell peek);
   }
 
   /**
@@ -181,10 +181,10 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
   class DeleteFamilyHinter implements Hinter {
 
     @Override
-    public KeyValue getHint(KeyValue peeked) {
+    public Cell getHint(Cell peeked) {
       // check to see if we have another column to seek
       ImmutableBytesPtr nextFamily =
-          getNextFamily(new ImmutableBytesPtr(peeked.getBuffer(), peeked.getFamilyOffset(),
+          getNextFamily(new ImmutableBytesPtr(peeked.getFamilyArray(), peeked.getFamilyOffset(),
               peeked.getFamilyLength()));
       if (nextFamily == null) {
         // no known next family, so we can be completely done
@@ -192,8 +192,9 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
         return KeyValue.LOWESTKEY;
       }
         // there is a valid family, so we should seek to that
-      return KeyValue.createFirstOnRow(peeked.getRow(), nextFamily.copyBytesIfNecessary(),
-        HConstants.EMPTY_BYTE_ARRAY);
+      return org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(peeked.getRowArray(),
+          peeked.getRowOffset(), peeked.getRowLength(), nextFamily.get(),
+          nextFamily.getOffset(), nextFamily.getLength(), HConstants.EMPTY_BYTE_ARRAY, 0, 0);
     }
 
   }
@@ -205,8 +206,8 @@ public class ApplyAndFilterDeletesFilter extends FilterBase {
   class DeleteColumnHinter implements Hinter {
 
     @Override
-    public KeyValue getHint(KeyValue kv) {
-      return KeyValueUtil.createLastOnRow(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
+    public Cell getHint(Cell kv) {
+      return org.apache.hadoop.hbase.KeyValueUtil.createLastOnRow(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
         kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(),
         kv.getQualifierOffset(), kv.getQualifierLength());
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java
index a8c7474..ec21946 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java
@@ -19,9 +19,9 @@ package org.apache.phoenix.hbase.index.covered.filter;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 /**
  * Inclusive filter on the maximum timestamp allowed. Excludes all elements greater than (but not
@@ -42,17 +42,12 @@ public class MaxTimestampFilter extends FilterBase {
     // with other filters too much.
     KeyValue kv = null;
     try {
-        kv = KeyValueUtil.ensureKeyValue(currentKV).clone();
+        kv = PhoenixKeyValueUtil.maybeCopyCell(currentKV).clone();
     } catch (CloneNotSupportedException e) {
         // the exception should not happen at all
         throw new IllegalArgumentException(e);
     }
-    int offset =kv.getTimestampOffset();
-    //set the timestamp in the buffer
-    byte[] buffer = kv.getBuffer();
-    byte[] ts = Bytes.toBytes(this.ts);
-    System.arraycopy(ts, 0, buffer, offset, ts.length);
-
+    kv.setTimestamp(ts);
     return kv;
   }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
index 00348b3..5aa1037 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.hbase.index.covered.update;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -139,7 +140,7 @@ public class ColumnReference implements Comparable<ColumnReference> {
     }
 
     public KeyValue getFirstKeyValueForRow(byte[] row) {
-        return KeyValue.createFirstOnRow(row, getFamily(), getQualifier() == ALL_QUALIFIERS ? null
+        return KeyValueUtil.createFirstOnRow(row, getFamily(), getQualifier() == ALL_QUALIFIERS ? null
                 : getQualifier());
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index 072b624..6359d6c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -22,10 +22,10 @@ import java.io.IOException;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
 import org.apache.phoenix.hbase.index.covered.KeyValueStore;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 /**
  * Combine a simplified version of the logic in the ScanQueryMatcher and the KeyValueScanner. We can get away with this
@@ -94,7 +94,7 @@ public class FilteredKeyValueScanner implements ReseekableScanner {
                 break;
             // use a seek hint to find out where we should go
             case SEEK_NEXT_USING_HINT:
-                delegate.seek(KeyValueUtil.ensureKeyValue(filter.getNextCellHint(peeked)));
+                delegate.seek(PhoenixKeyValueUtil.maybeCopyCell(filter.getNextCellHint(peeked)));
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java
index ebffde1..b2bfa0b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java
@@ -22,8 +22,9 @@ import static org.apache.phoenix.hbase.index.util.ImmutableBytesPtr.copyBytesIfN
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -89,8 +90,8 @@ public class GenericKeyValueBuilder extends KeyValueBuilder {
     }
 
     @Override
-    public KVComparator getKeyValueComparator() {
-        return KeyValue.COMPARATOR;
+    public CellComparator getKeyValueComparator() {
+        return CellComparatorImpl.COMPARATOR;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index a4a34a1..2d65747 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -128,7 +128,10 @@ public class IndexManagementUtil {
         boolean matches = false;
         outer: for (KeyValue kv : update) {
             for (ColumnReference ref : columns) {
-                if (ref.matchesFamily(kv.getFamily()) && ref.matchesQualifier(kv.getQualifier())) {
+                if (ref.matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(),
+                    kv.getFamilyLength())
+                        && ref.matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(),
+                            kv.getQualifierLength())) {
                     matches = true;
                     // if a single column matches a single kv, we need to build a whole scanner
                     break outer;
@@ -150,7 +153,10 @@ public class IndexManagementUtil {
         boolean matches = false;
         outer: for (ColumnReference ref : columns) {
             for (KeyValue kv : update) {
-                if (ref.matchesFamily(kv.getFamily()) && ref.matchesQualifier(kv.getQualifier())) {
+                if (ref.matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(),
+                    kv.getFamilyLength())
+                        && ref.matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(),
+                            kv.getQualifierLength())) {
                     matches = true;
                     // if a single column matches a single kv, we need to build a whole scanner
                     break outer;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java
index 9433abf..c6967cb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java
@@ -22,10 +22,10 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -60,7 +60,7 @@ public abstract class KeyValueBuilder {
     @SuppressWarnings("javadoc")
     public static void deleteQuietly(Delete delete, KeyValueBuilder builder, KeyValue kv) {
         try {
-            delete.addDeleteMarker(kv);
+            delete.add(kv);
         } catch (IOException e) {
             throw new RuntimeException("KeyValue Builder " + builder + " created an invalid kv: "
                     + kv + "!");
@@ -122,7 +122,7 @@ public abstract class KeyValueBuilder {
    */
   public abstract void getValueAsPtr(Cell kv, ImmutableBytesWritable ptr);
   
-  public abstract KVComparator getKeyValueComparator();
+  public abstract CellComparator getKeyValueComparator();
   
   public abstract List<Mutation> cloneIfNecessary(List<Mutation> mutations);
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index b04cf0a..f2b3b98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
@@ -66,14 +66,6 @@ public class IndexedKeyValue extends KeyValue {
         return mutation;
     }
 
-    /*
-     * Returns a faked column family for an IndexedKeyValue instance
-     */
-    @Override
-    public byte [] getFamily() {
-      return WALEdit.METAFAMILY;
-    }
-    
     @Override
     public byte[] getFamilyArray() {
         return WALEdit.METAFAMILY;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
index 682a504..d02d431 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java
@@ -25,9 +25,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 
 /**
  * Codec to encode/decode {@link KeyValue}s and {@link IndexedKeyValue}s within a {@link WALEdit}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index dc26d5a..887a04c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -1136,10 +1136,10 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
      * since we can build the corresponding index row key.
      */
     public Delete buildDeleteMutation(KeyValueBuilder kvBuilder, ImmutableBytesWritable dataRowKeyPtr, long ts) throws IOException {
-        return buildDeleteMutation(kvBuilder, null, dataRowKeyPtr, Collections.<KeyValue>emptyList(), ts, null, null);
+        return buildDeleteMutation(kvBuilder, null, dataRowKeyPtr, Collections.<Cell>emptyList(), ts, null, null);
     }
     
-    public Delete buildDeleteMutation(KeyValueBuilder kvBuilder, ValueGetter oldState, ImmutableBytesWritable dataRowKeyPtr, Collection<KeyValue> pendingUpdates, long ts, byte[] regionStartKey, byte[] regionEndKey) throws IOException {
+    public Delete buildDeleteMutation(KeyValueBuilder kvBuilder, ValueGetter oldState, ImmutableBytesWritable dataRowKeyPtr, Collection<Cell> pendingUpdates, long ts, byte[] regionStartKey, byte[] regionEndKey) throws IOException {
         byte[] indexRowKey = this.buildRowKey(oldState, dataRowKeyPtr, regionStartKey, regionEndKey, ts);
         // Delete the entire row if any of the indexed columns changed
         DeleteType deleteType = null;
@@ -1169,7 +1169,10 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         // Delete columns for missing key values
         for (Cell kv : pendingUpdates) {
             if (kv.getTypeByte() != KeyValue.Type.Put.getCode()) {
-                ColumnReference ref = new ColumnReference(kv.getFamily(), kv.getQualifier());
+                ColumnReference ref =
+                        new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(),
+                                kv.getFamilyLength(), kv.getQualifierArray(),
+                                kv.getQualifierOffset(), kv.getQualifierLength());
                 if (dataTableColRefs.contains(ref)) {
                     if (delete == null) {
                         delete = new Delete(indexRowKey);                    

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index 679c5df..5b76572 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -30,8 +30,8 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -221,7 +221,7 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder {
                 // ordered correctly). We only need the list sorted if the expressions are going to be
                 // executed, not when the outer loop is exited. Hence we do it here, at the top of the loop.
                 if (flattenedCells != null) {
-                    Collections.sort(flattenedCells,KeyValue.COMPARATOR);
+                    Collections.sort(flattenedCells,CellComparatorImpl.COMPARATOR);
                 }
                 PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false);
                 int adjust = table.getBucketNum() == null ? 1 : 2;
@@ -272,7 +272,7 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder {
                     transferAttributes(inc, delete);
                     mutations.add(delete);
                 }
-                delete.addDeleteMarker(cell);
+                delete.add(cell);
             }
         }
         return mutations;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index 9847205..4641a8d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -87,6 +87,7 @@ import org.apache.phoenix.transaction.PhoenixTransactionContext;
 import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel;
 import org.apache.phoenix.transaction.PhoenixTransactionalTable;
 import org.apache.phoenix.transaction.TransactionFactory;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -517,7 +518,7 @@ public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoproc
         private final long currentTimestamp;
         private final RegionCoprocessorEnvironment env;
         private final Map<String, byte[]> attributes;
-        private final List<KeyValue> pendingUpdates;
+        private final List<Cell> pendingUpdates;
         private final Set<ColumnReference> indexedColumns;
         private final Map<ColumnReference, ImmutableBytesWritable> valueMap;
         
@@ -533,8 +534,7 @@ public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoproc
             try {
                 CellScanner scanner = mutation.cellScanner();
                 while (scanner.advance()) {
-                    Cell cell = scanner.current();
-                    pendingUpdates.add(KeyValueUtil.ensureKeyValue(cell));
+                    pendingUpdates.add(scanner.current());
                 }
             } catch (IOException e) {
                 throw new RuntimeException(e); // Impossible
@@ -604,7 +604,7 @@ public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoproc
         }
         
         @Override
-        public Collection<KeyValue> getPendingUpdate() {
+        public Collection<Cell> getPendingUpdate() {
             return pendingUpdates;
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java
index 84d29ff..1d88c9c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java
@@ -24,12 +24,12 @@ import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN_FAMILY;
 import java.sql.SQLException;
 import java.util.List;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.aggregator.Aggregator;
 import org.apache.phoenix.expression.aggregator.Aggregators;
 import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 /**
  * 
@@ -57,7 +57,7 @@ public abstract class BaseGroupedAggregatingResultIterator implements
     }
     
     protected abstract ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) throws SQLException;
-    protected abstract Tuple wrapKeyValueAsResult(KeyValue keyValue) throws SQLException;
+    protected abstract Tuple wrapKeyValueAsResult(Cell keyValue) throws SQLException;
 
     @Override
     public Tuple next() throws SQLException {
@@ -80,7 +80,7 @@ public abstract class BaseGroupedAggregatingResultIterator implements
         }
         
         byte[] value = aggregators.toBytes(rowAggregators);
-        Tuple tuple = wrapKeyValueAsResult(KeyValueUtil.newKeyValue(currentKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
+        Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(currentKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
         currentKey.set(nextKey.get(), nextKey.getOffset(), nextKey.getLength());
         return tuple;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java
index 1cf9b73..c0553fa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java
@@ -19,7 +19,7 @@ package org.apache.phoenix.iterate;
 
 import java.sql.SQLException;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.aggregator.Aggregators;
 import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
@@ -58,7 +58,7 @@ public class GroupedAggregatingResultIterator extends BaseGroupedAggregatingResu
     }
 
     @Override
-    protected Tuple wrapKeyValueAsResult(KeyValue keyValue) throws SQLException {
+    protected Tuple wrapKeyValueAsResult(Cell keyValue) throws SQLException {
         return new SingleKeyValueTuple(keyValue);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java
index ae2f452..529a0c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferSortedQueue.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Queue;
+
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.iterate.OrderedResultIterator.ResultEntry;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.ResultUtil;
 
 import com.google.common.collect.MinMaxPriorityQueue;
@@ -82,7 +84,6 @@ public class MappedByteBufferSortedQueue extends MappedByteBufferQueue<ResultEnt
             return sizeof(e.sortKeys) + sizeof(toKeyValues(e));
         }
 
-        @SuppressWarnings("deprecation")
         @Override
         protected void writeToBuffer(MappedByteBuffer buffer, ResultEntry e) {
             int totalLen = 0;
@@ -140,7 +141,7 @@ public class MappedByteBufferSortedQueue extends MappedByteBufferQueue<ResultEnt
             int size = result.size();
             List<KeyValue> kvs = new ArrayList<KeyValue>(size);
             for (int i = 0; i < size; i++) {
-                kvs.add(org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(result.getValue(i)));
+                kvs.add(PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i)));
             }
             return kvs;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java
index ded33cc..c78280d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java
@@ -245,11 +245,11 @@ public class NonAggregateRegionScannerFactory extends RegionScannerFactory {
     try {
       Tuple tuple = iterator.next();
       if (tuple == null && !isLastScan) {
-        List<KeyValue> kvList = new ArrayList<KeyValue>(1);
+        List<Cell> kvList = new ArrayList<Cell>(1);
         KeyValue kv = new KeyValue(QueryConstants.OFFSET_ROW_KEY_BYTES, QueryConstants.OFFSET_FAMILY,
             QueryConstants.OFFSET_COLUMN, PInteger.INSTANCE.toBytes(iterator.getRemainingOffset()));
         kvList.add(kv);
-        Result r = new Result(kvList);
+        Result r = Result.create(kvList);
         firstTuple = new ResultTuple(r);
       } else {
         firstTuple = tuple;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java
index 3c52e51..bb4e83b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java
@@ -29,7 +29,7 @@ import org.apache.phoenix.expression.aggregator.Aggregator;
 import org.apache.phoenix.expression.aggregator.Aggregators;
 import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.ServerUtil;
 
 
@@ -166,7 +166,7 @@ public class RowKeyOrderedAggregateResultIterator extends LookAheadResultIterato
                 current = previous;
             } else {
                 byte[] value = aggregators.toBytes(rowAggregators);
-                current = new SingleKeyValueTuple(KeyValueUtil.newKeyValue(previousKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
+                current = new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(previousKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
             }
         }
         if (current == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java
index e3d0987..0bf5982 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java
@@ -24,7 +24,7 @@ import java.sql.SQLException;
 import org.apache.phoenix.expression.aggregator.Aggregators;
 import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 
 public class UngroupedAggregatingResultIterator extends GroupedAggregatingResultIterator {
@@ -43,7 +43,7 @@ public class UngroupedAggregatingResultIterator extends GroupedAggregatingResult
             aggregators.reset(aggregators.getAggregators());
             byte[] value = aggregators.toBytes(aggregators.getAggregators());
             result = new SingleKeyValueTuple(
-                    KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, 
+                    PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, 
                             SINGLE_COLUMN_FAMILY, 
                             SINGLE_COLUMN, 
                             AGG_TIMESTAMP, 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index c34d20d..7ca178b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -29,6 +29,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Result;
@@ -66,7 +67,7 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 
@@ -615,7 +616,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
                 List<Cell> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
                 newCells.addAll(cells);
                 newCells.add(kv);
-                Collections.sort(newCells, KeyValue.COMPARATOR);
+                Collections.sort(newCells, CellComparatorImpl.COMPARATOR);
                 tuple = new ResultTuple(Result.create(newCells));
             }
             return tuple;
@@ -1051,7 +1052,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
                 PTableType.TABLE.getValue().getBytes(),
                 PTableType.VIEW.getValue().getBytes());
         for (byte[] tableType : tableTypes) {
-            TABLE_TYPE_TUPLES.add(new SingleKeyValueTuple(KeyValueUtil.newKeyValue(tableType, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY)));
+            TABLE_TYPE_TUPLES.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(tableType, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY)));
         }
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index d35cce1..6bbfd16 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -171,7 +171,7 @@ import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.CursorUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.PhoenixContextExecutor;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -587,20 +587,20 @@ public class PhoenixStatement implements Statement, SQLCloseable {
             for (String planStep : planSteps) {
                 byte[] row = PVarchar.INSTANCE.toBytes(planStep);
                 List<Cell> cells = Lists.newArrayListWithCapacity(3);
-                cells.add(KeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_COLUMN,
+                cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_COLUMN,
                     MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY));
                 if (estimatedBytesToScan != null) {
-                    cells.add(KeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_BYTES_ESTIMATE,
+                    cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_BYTES_ESTIMATE,
                         MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                         PLong.INSTANCE.toBytes(estimatedBytesToScan)));
                 }
                 if (estimatedRowsToScan != null) {
-                    cells.add(KeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_ROWS_ESTIMATE,
+                    cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_ROWS_ESTIMATE,
                         MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                         PLong.INSTANCE.toBytes(estimatedRowsToScan)));
                 }
                 if (estimateInfoTimestamp != null) {
-                    cells.add(KeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_ESTIMATE_INFO_TS,
+                    cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_ESTIMATE_INFO_TS,
                         MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                         PLong.INSTANCE.toBytes(estimateInfoTimestamp)));
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index 360859e..3925bdb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -172,31 +172,31 @@ public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWri
                 return;
             }
             upsertExecutor.execute(ImmutableList.<RECORD>of(record));
-            Map<Integer, List<KeyValue>> map = new HashMap<>();
-            Iterator<Pair<byte[], List<KeyValue>>> uncommittedDataIterator
+            Map<Integer, List<Cell>> map = new HashMap<>();
+            Iterator<Pair<byte[], List<Cell>>> uncommittedDataIterator
                     = PhoenixRuntime.getUncommittedDataIterator(conn, true);
             while (uncommittedDataIterator.hasNext()) {
-                Pair<byte[], List<KeyValue>> kvPair = uncommittedDataIterator.next();
-                List<KeyValue> keyValueList = kvPair.getSecond();
+                Pair<byte[], List<Cell>> kvPair = uncommittedDataIterator.next();
+                List<Cell> keyValueList = kvPair.getSecond();
                 keyValueList = preUpdateProcessor.preUpsert(kvPair.getFirst(), keyValueList);
                 byte[] first = kvPair.getFirst();
                 // Create a list of KV for each table
                 for (int i = 0; i < tableNames.size(); i++) {
                     if (Bytes.compareTo(Bytes.toBytes(tableNames.get(i)), first) == 0) {
                         if (!map.containsKey(i)) {
-                            map.put(i, new ArrayList<KeyValue>());
+                            map.put(i, new ArrayList<Cell>());
                         }
-                        List<KeyValue> list = map.get(i);
-                        for (KeyValue kv : keyValueList) {
+                        List<Cell> list = map.get(i);
+                        for (Cell kv : keyValueList) {
                             list.add(kv);
                         }
                         break;
                     }
                 }
             }
-            for (Map.Entry<Integer, List<KeyValue>> rowEntry : map.entrySet()) {
+            for (Map.Entry<Integer, List<Cell>> rowEntry : map.entrySet()) {
                 int tableIndex = rowEntry.getKey();
-                List<KeyValue> lkv = rowEntry.getValue();
+                List<Cell> lkv = rowEntry.getValue();
                 // All KV values combines to a single byte array
                 writeAggregatedRow(context, tableNames.get(tableIndex), lkv);
             }
@@ -281,13 +281,13 @@ public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWri
      * @throws InterruptedException
      */
 
-    private void writeAggregatedRow(Context context, String tableName, List<KeyValue> lkv)
+    private void writeAggregatedRow(Context context, String tableName, List<Cell> lkv)
             throws IOException, InterruptedException {
         ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
         DataOutputStream outputStream = new DataOutputStream(bos);
         ImmutableBytesWritable outputKey =null;
         if (!lkv.isEmpty()) {
-            for (KeyValue cell : lkv) {
+            for (Cell cell : lkv) {
                 if (outputKey == null || Bytes.compareTo(outputKey.get(), outputKey.getOffset(),
                         outputKey.getLength(), cell.getRowArray(), cell.getRowOffset(), cell
                                 .getRowLength()) != 0) {
@@ -413,7 +413,7 @@ public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWri
             ImportPreUpsertKeyValueProcessor {
 
         @Override
-        public List<KeyValue> preUpsert(byte[] rowKey, List<KeyValue> keyValues) {
+        public List<Cell> preUpsert(byte[] rowKey, List<Cell> keyValues) {
             return keyValues;
         }
     }


[2/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
index dff9ef2..22d40d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.mapreduce;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 
 import java.util.List;
@@ -43,6 +44,6 @@ public interface ImportPreUpsertKeyValueProcessor {
      * @param keyValues list of KeyValues that are to be written to an HFile
      * @return the list that will actually be written
      */
-    List<KeyValue> preUpsert(byte[] rowKey, List<KeyValue> keyValues);
+    List<Cell> preUpsert(byte[] rowKey, List<Cell> keyValues);
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index c888b7d..bb38923 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -72,6 +72,7 @@ import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair;
 import org.apache.phoenix.mapreduce.bulkload.TargetTableRef;
 import org.apache.phoenix.mapreduce.bulkload.TargetTableRefFunctions;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -144,7 +145,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
             @Override
             public void write(TableRowkeyPair row, V cell)
                     throws IOException {
-                KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+                KeyValue kv = KeyValueUtil.maybeCopyCell(cell);
                 // null input == user explicitly wants to flush
                 if (row == null && kv == null) {
                     rollWriters();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
index 9e0d629..6f469e6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportMapper.java
@@ -26,6 +26,7 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -37,6 +38,7 @@ import org.apache.phoenix.mapreduce.PhoenixJobCounters;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.util.ColumnInfo;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -100,18 +102,18 @@ public class PhoenixIndexImportMapper extends Mapper<NullWritable, PhoenixIndexD
            indxWritable.write(this.pStatement);
            this.pStatement.execute();
             
-           final Iterator<Pair<byte[], List<KeyValue>>> uncommittedDataIterator = PhoenixRuntime.getUncommittedDataIterator(connection, true);
+           final Iterator<Pair<byte[], List<Cell>>> uncommittedDataIterator = PhoenixRuntime.getUncommittedDataIterator(connection, true);
            while (uncommittedDataIterator.hasNext()) {
-                Pair<byte[], List<KeyValue>> kvPair = uncommittedDataIterator.next();
+                Pair<byte[], List<Cell>> kvPair = uncommittedDataIterator.next();
                 if (Bytes.compareTo(Bytes.toBytes(indexTableName), kvPair.getFirst()) != 0) {
                     // skip edits for other tables
                     continue;
                 }
-                List<KeyValue> keyValueList = kvPair.getSecond();
+                List<Cell> keyValueList = kvPair.getSecond();
                 keyValueList = preUpdateProcessor.preUpsert(kvPair.getFirst(), keyValueList);
-                for (KeyValue kv : keyValueList) {
+                for (Cell kv : keyValueList) {
                     outputKey.set(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
-                    context.write(outputKey, kv);
+                    context.write(outputKey, PhoenixKeyValueUtil.maybeCopyCell(kv));
                 }
                 context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment(1);
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
index 5b85da5..60f07b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
@@ -125,7 +125,7 @@ public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWr
                         del.setAttribute(BaseScannerRegionObserver.REPLAY_WRITES, BaseScannerRegionObserver.REPLAY_ONLY_INDEX_WRITES);
                         mutations.add(del);
                     }
-                    del.addDeleteMarker(cell);
+                    del.add(cell);
                 }
             }
             // Write Mutation Batch

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index fd84c7c..0b48376 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -1034,7 +1034,6 @@ public class PTableImpl implements PTable {
             }
         }
 
-        @SuppressWarnings("deprecation")
         @Override
         public void delete() {
             newMutations();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java
index 9598ace..2d9f339 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java
@@ -36,6 +36,7 @@ import java.util.Map;
 import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Append;
@@ -53,7 +54,7 @@ import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.SequenceUtil;
 
 import com.google.common.collect.Lists;
@@ -66,14 +67,14 @@ public class Sequence {
     public enum MetaOp {CREATE_SEQUENCE, DROP_SEQUENCE, RETURN_SEQUENCE};
     
     // create empty Sequence key values used while created a sequence row
-    private static final KeyValue CURRENT_VALUE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CURRENT_VALUE_BYTES);
-    private static final KeyValue INCREMENT_BY_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, INCREMENT_BY_BYTES);
-    private static final KeyValue CACHE_SIZE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CACHE_SIZE_BYTES);
-    private static final KeyValue MIN_VALUE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, MIN_VALUE_BYTES);
-    private static final KeyValue MAX_VALUE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, MAX_VALUE_BYTES);
-    private static final KeyValue CYCLE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CYCLE_FLAG_BYTES);
-    private static final KeyValue LIMIT_REACHED_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, LIMIT_REACHED_FLAG_BYTES);
-    private static final List<KeyValue> SEQUENCE_KV_COLUMNS = Arrays.<KeyValue>asList(
+    private static final Cell CURRENT_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CURRENT_VALUE_BYTES);
+    private static final Cell INCREMENT_BY_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, INCREMENT_BY_BYTES);
+    private static final Cell CACHE_SIZE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CACHE_SIZE_BYTES);
+    private static final Cell MIN_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, MIN_VALUE_BYTES);
+    private static final Cell MAX_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, MAX_VALUE_BYTES);
+    private static final Cell CYCLE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CYCLE_FLAG_BYTES);
+    private static final Cell LIMIT_REACHED_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, LIMIT_REACHED_FLAG_BYTES);
+    private static final List<Cell> SEQUENCE_KV_COLUMNS = Arrays.<Cell>asList(
             CURRENT_VALUE_KV,
             INCREMENT_BY_KV,
             CACHE_SIZE_KV,
@@ -84,7 +85,7 @@ public class Sequence {
             LIMIT_REACHED_KV
             );
     static {
-        Collections.sort(SEQUENCE_KV_COLUMNS, KeyValue.COMPARATOR);
+        Collections.sort(SEQUENCE_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
     }
     // Pre-compute index of sequence key values to prevent binary search
     private static final int CURRENT_VALUE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(CURRENT_VALUE_KV);
@@ -301,8 +302,8 @@ public class Sequence {
         append.setAttribute(SequenceRegionObserver.CURRENT_VALUE_ATTRIB, PLong.INSTANCE.toBytes(value.nextValue));
         Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
         familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
-        		(Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, PLong.INSTANCE.toBytes(value.currentValue)),
-        		(Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, value.timestamp, PBoolean.INSTANCE.toBytes(value.limitReached))
+        		(Cell)PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, PLong.INSTANCE.toBytes(value.currentValue)),
+        		(Cell)PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, value.timestamp, PBoolean.INSTANCE.toBytes(value.limitReached))
                 ));
         return append;
     }
@@ -363,7 +364,7 @@ public class Sequence {
         } catch (IOException e) {
             throw new RuntimeException(e); // Impossible
         }
-        for (KeyValue kv : SEQUENCE_KV_COLUMNS) {
+        for (Cell kv : SEQUENCE_KV_COLUMNS) {
             try {
                 // Store the timestamp on the cell as well as HBase 1.2 seems to not
                 // be serializing over the time range (see HBASE-15698).
@@ -388,64 +389,63 @@ public class Sequence {
      * @param cellIndex index of the KeyValue to be returned (if the sequence row is from a previous version
      * @return KeyValue
      */
-    private static KeyValue getKeyValue(Result r, KeyValue kv, int cellIndex) {
+    private static Cell getKeyValue(Result r, Cell kv, int cellIndex) {
         Cell[] cells = r.rawCells();
         // if the sequence row is from a previous version then MIN_VALUE, MAX_VALUE, CYCLE and LIMIT_REACHED key values are not present,
         // the sequence row has only three columns (INCREMENT_BY, CACHE_SIZE and CURRENT_VALUE) and the order of the cells 
         // in the array returned by rawCells() is not what what we expect so use getColumnLatestCell() to get the cell we want
-        Cell cell = cells.length == NUM_SEQUENCE_KEY_VALUES ? cells[cellIndex] :
+        return cells.length == NUM_SEQUENCE_KEY_VALUES ? cells[cellIndex] :
         	r.getColumnLatestCell(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength());
-        return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell);
     }
     
-    private static KeyValue getKeyValue(Result r, KeyValue kv) {
+    private static Cell getKeyValue(Result r, Cell kv) {
         return getKeyValue(r, kv, SEQUENCE_KV_COLUMNS.indexOf(kv));
     }
     
-    public static KeyValue getCurrentValueKV(Result r) {
+    public static Cell getCurrentValueKV(Result r) {
         return getKeyValue(r, CURRENT_VALUE_KV, CURRENT_VALUE_INDEX);
     }
     
-    public static KeyValue getIncrementByKV(Result r) {
+    public static Cell getIncrementByKV(Result r) {
         return getKeyValue(r, INCREMENT_BY_KV, INCREMENT_BY_INDEX);
     }
     
-    public static KeyValue getCacheSizeKV(Result r) {
+    public static Cell getCacheSizeKV(Result r) {
         return getKeyValue(r, CACHE_SIZE_KV, CACHE_SIZE_INDEX);
     }
     
-    public static KeyValue getMinValueKV(Result r) {
+    public static Cell getMinValueKV(Result r) {
         return getKeyValue(r, MIN_VALUE_KV, MIN_VALUE_INDEX);
     }
     
-    public static KeyValue getMaxValueKV(Result r) {
+    public static Cell getMaxValueKV(Result r) {
         return getKeyValue(r, MAX_VALUE_KV, MAX_VALUE_INDEX);
     }
     
-    public static KeyValue getCycleKV(Result r) {
+    public static Cell getCycleKV(Result r) {
         return getKeyValue(r, CYCLE_KV, CYCLE_INDEX);
     }
 
-    public static KeyValue getLimitReachedKV(Result r) {
+    public static Cell getLimitReachedKV(Result r) {
         return getKeyValue(r, LIMIT_REACHED_KV, LIMIT_REACHED_INDEX);
     }
     
-    public static void replaceCurrentValueKV(List<Cell> kvs, KeyValue currentValueKV) {
+    public static void replaceCurrentValueKV(List<Cell> kvs, Cell currentValueKV) {
         kvs.set(CURRENT_VALUE_INDEX, currentValueKV);
     }
     
-    public static void replaceMinValueKV(List<Cell> kvs, KeyValue minValueKV) {
+    public static void replaceMinValueKV(List<Cell> kvs, Cell minValueKV) {
         kvs.set(MIN_VALUE_INDEX, minValueKV);
     }
 
-    public static void replaceMaxValueKV(List<Cell> kvs, KeyValue maxValueKV) {
+    public static void replaceMaxValueKV(List<Cell> kvs, Cell maxValueKV) {
         kvs.set(MAX_VALUE_INDEX, maxValueKV);
     }
 
-    public static void replaceCycleValueKV(List<Cell> kvs, KeyValue cycleValueKV) {
+    public static void replaceCycleValueKV(List<Cell> kvs, Cell cycleValueKV) {
         kvs.set(CYCLE_INDEX, cycleValueKV);
     }
-    public static void replaceLimitReachedKV(List<Cell> kvs, KeyValue limitReachedKV) {
+    public static void replaceLimitReachedKV(List<Cell> kvs, Cell limitReachedKV) {
         kvs.set(LIMIT_REACHED_INDEX, limitReachedKV);
     }
     
@@ -459,7 +459,7 @@ public class Sequence {
             return Lists.newArrayList(r.rawCells());
         // else we need to handle missing MIN_VALUE, MAX_VALUE, CYCLE and LIMIT_REACHED KeyValues
         List<Cell> cellList = Lists.newArrayListWithCapacity(NUM_SEQUENCE_KEY_VALUES);
-        for (KeyValue kv : SEQUENCE_KV_COLUMNS) {
+        for (Cell kv : SEQUENCE_KV_COLUMNS) {
             cellList.add(getKeyValue(r,kv));
         }
         return cellList;
@@ -502,12 +502,12 @@ public class Sequence {
         }
         
         public SequenceValue(Result r, ValueOp op, long numToAllocate) {
-            KeyValue currentValueKV = getCurrentValueKV(r);
-            KeyValue incrementByKV = getIncrementByKV(r);
-            KeyValue cacheSizeKV = getCacheSizeKV(r);
-            KeyValue minValueKV = getMinValueKV(r);
-            KeyValue maxValueKV = getMaxValueKV(r);
-            KeyValue cycleKV = getCycleKV(r);
+            Cell currentValueKV = getCurrentValueKV(r);
+            Cell incrementByKV = getIncrementByKV(r);
+            Cell cacheSizeKV = getCacheSizeKV(r);
+            Cell minValueKV = getMinValueKV(r);
+            Cell maxValueKV = getMaxValueKV(r);
+            Cell cycleKV = getCycleKV(r);
             this.timestamp = currentValueKV.getTimestamp();
             this.nextValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault());
             this.incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), SortOrder.getDefault());
@@ -563,15 +563,15 @@ public class Sequence {
         Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
         byte[] startWithBuf = PLong.INSTANCE.toBytes(startWith);
         familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PLong.INSTANCE.toBytes(incrementBy)),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PLong.INSTANCE.toBytes(cacheSize)),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(minValue)),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(maxValue)),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, timestamp, PBoolean.INSTANCE.toBytes(cycle)),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, timestamp, PDataType.FALSE_BYTES)
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PLong.INSTANCE.toBytes(incrementBy)),
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PLong.INSTANCE.toBytes(cacheSize)),
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(minValue)),
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(maxValue)),
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, timestamp, PBoolean.INSTANCE.toBytes(cycle)),
+                PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, timestamp, PDataType.FALSE_BYTES)
                 ));
         return append;
     }
@@ -601,7 +601,7 @@ public class Sequence {
         }
         Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
         familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
-                (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY)));
+                (Cell)PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY)));
         return append;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 4e4978c..c14759f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Get;
@@ -311,12 +312,11 @@ class DefaultStatisticsCollector implements StatisticsCollector {
             incrementRow = true;
         }
         for (Cell cell : results) {
-            KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
-            maxTimeStamp = Math.max(maxTimeStamp, kv.getTimestamp());
+            maxTimeStamp = Math.max(maxTimeStamp, cell.getTimestamp());
             Pair<Long, GuidePostsInfoBuilder> gps;
             if (cachedGuidePosts == null) {
-                ImmutableBytesPtr cfKey = new ImmutableBytesPtr(kv.getFamilyArray(), kv.getFamilyOffset(),
-                        kv.getFamilyLength());
+                ImmutableBytesPtr cfKey = new ImmutableBytesPtr(cell.getFamilyArray(), cell.getFamilyOffset(),
+                        cell.getFamilyLength());
                 gps = guidePostsInfoWriterMap.get(cfKey);
                 if (gps == null) {
                     gps = new Pair<Long, GuidePostsInfoBuilder>(0l,
@@ -334,7 +334,7 @@ class DefaultStatisticsCollector implements StatisticsCollector {
                     incrementRow = false;
                 }
             }
-            int kvLength = kv.getLength();
+            int kvLength = CellUtil.estimatedSerializedSizeOf(cell);
             long byteCount = gps.getFirst() + kvLength;
             gps.setFirst(byteCount);
             if (byteCount >= guidePostDepth) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
index bde049b..bfa63ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 /**
  * Tuple that can be used to represent a list of cells. It is imperative that the list of cells
@@ -59,7 +59,7 @@ public class MultiKeyValueTuple extends BaseTuple {
 
     @Override
     public Cell getValue(byte[] family, byte[] qualifier) {
-        return KeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, values, family, qualifier);
+        return PhoenixKeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, values, family, qualifier);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java
index 63ba101..276c72d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java
@@ -48,8 +48,8 @@ public class PositionBasedResultTuple extends BaseTuple {
     }
 
     @Override
-    public KeyValue getValue(byte[] family, byte[] qualifier) {
-        return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cells.getCellForColumnQualifier(qualifier));
+    public Cell getValue(byte[] family, byte[] qualifier) {
+        return cells.getCellForColumnQualifier(qualifier);
     }
 
     @Override
@@ -81,14 +81,14 @@ public class PositionBasedResultTuple extends BaseTuple {
     }
 
     @Override
-    public KeyValue getValue(int index) {
-        return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(index == 0 ? cells.getFirstCell() : cells.get(index));
+    public Cell getValue(int index) {
+        return index == 0 ? cells.getFirstCell() : cells.get(index);
     }
 
     @Override
     public boolean getValue(byte[] family, byte[] qualifier,
             ImmutableBytesWritable ptr) {
-        KeyValue kv = getValue(family, qualifier);
+        Cell kv = getValue(family, qualifier);
         if (kv == null)
             return false;
         ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
index 3774837..3419e3c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 
 /**
  * 
@@ -55,10 +55,9 @@ public class ResultTuple extends BaseTuple {
     }
 
     @Override
-    public KeyValue getValue(byte[] family, byte[] qualifier) {
-        Cell cell = KeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, 
+    public Cell getValue(byte[] family, byte[] qualifier) {
+        return PhoenixKeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, 
           result.rawCells(), family, qualifier);
-        return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell);
     }
 
     @Override
@@ -91,14 +90,13 @@ public class ResultTuple extends BaseTuple {
 
     @Override
     public KeyValue getValue(int index) {
-        return  org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(
-          result.rawCells()[index]);
+        return  PhoenixKeyValueUtil.maybeCopyCell(result.rawCells()[index]);
     }
 
     @Override
     public boolean getValue(byte[] family, byte[] qualifier,
             ImmutableBytesWritable ptr) {
-        KeyValue kv = getValue(family, qualifier);
+        Cell kv = getValue(family, qualifier);
         if (kv == null)
             return false;
         ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index cacf4c4..c26d2cb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -285,7 +285,7 @@ public class IndexUtil {
                     regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
                     regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
                 }
-                Delete delete = maintainer.buildDeleteMutation(kvBuilder, null, ptr, Collections.<KeyValue>emptyList(), ts, regionStartKey, regionEndkey);
+                Delete delete = maintainer.buildDeleteMutation(kvBuilder, null, ptr, Collections.<Cell>emptyList(), ts, regionStartKey, regionEndkey);
                 delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, dataMutation.getAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY));
                 indexMutations.add(delete);
             }
@@ -558,8 +558,8 @@ public class IndexUtil {
             byte[] value =
                     tupleProjector.getSchema().toBytes(joinTuple, tupleProjector.getExpressions(),
                         tupleProjector.getValueBitSet(), ptr);
-            KeyValue keyValue =
-                    KeyValueUtil.newKeyValue(firstCell.getRowArray(),firstCell.getRowOffset(),firstCell.getRowLength(), VALUE_COLUMN_FAMILY,
+            Cell keyValue =
+                    PhoenixKeyValueUtil.newKeyValue(firstCell.getRowArray(),firstCell.getRowOffset(),firstCell.getRowLength(), VALUE_COLUMN_FAMILY,
                         VALUE_COLUMN_QUALIFIER, firstCell.getTimestamp(), value, 0, value.length);
             result.add(keyValue);
         }
@@ -658,31 +658,6 @@ public class IndexUtil {
                 public int getTagsLength() {
                     return cell.getTagsLength();
                 }
-
-                @Override
-                public long getMvccVersion() {
-                    return cell.getMvccVersion();
-                }
-
-                @Override
-                public byte[] getValue() {
-                    return cell.getValue();
-                }
-
-                @Override
-                public byte[] getFamily() {
-                    return cell.getFamily();
-                }
-
-                @Override
-                public byte[] getQualifier() {
-                    return cell.getQualifier();
-                }
-
-                @Override
-                public byte[] getRow() {
-                    return cell.getRow();
-                }
             };
             itr.set(newCell);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
deleted file mode 100644
index 4234df5..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.util;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.execute.MutationState.RowMutationState;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
-import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.TableRef;
-import org.apache.phoenix.schema.types.PArrayDataTypeEncoder;
-
-/**
- * 
- * Utilities for KeyValue. Where there's duplication with KeyValue methods,
- * these avoid creating new objects when not necessary (primary preventing
- * byte array copying).
- *
- * 
- * @since 0.1
- */
-public class KeyValueUtil {
-    private KeyValueUtil() {
-    }
-
-    public static KeyValue newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) {
-        return new KeyValue(key, 0, key.length,
-                cf, 0, cf.length,
-                cq, 0, cq.length,
-                ts, Type.Put,
-                value, valueOffset, valueLength);
-    }
-
-    public static KeyValue newKeyValue(ImmutableBytesWritable key, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) {
-        return new KeyValue(key.get(), key.getOffset(), key.getLength(),
-                cf, 0, cf.length,
-                cq, 0, cq.length,
-                ts, Type.Put,
-                value, valueOffset, valueLength);
-    }
-
-    public static KeyValue newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) {
-        return new KeyValue(key, keyOffset, keyLength,
-                cf, 0, cf.length,
-                cq, 0, cq.length,
-                ts, Type.Put,
-                value, valueOffset, valueLength);
-    }
-    
-    public static KeyValue newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, 
-        int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength, long ts, byte[] value, 
-        int valueOffset, int valueLength) {
-        return new KeyValue(key, keyOffset, keyLength,
-                cf, cfOffset, cfLength,
-                cq, cqOffset, cqLength,
-                ts, Type.Put,
-                value, valueOffset, valueLength);
-    }
-    
-	public static KeyValue newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value) {
-		return newKeyValue(key, cf, cq, ts, value, 0, value.length);
-	}
-
-    /**
-     * Binary search for latest column value without allocating memory in the process
-     * @param kvBuilder TODO
-     * @param kvs
-     * @param family
-     * @param qualifier
-     */
-    public static Cell getColumnLatest(KeyValueBuilder kvBuilder, List<Cell>kvs, byte[] family, byte[] qualifier) {
-        if (kvs.size() == 0) {
-        	return null;
-        }
-        assert CellUtil.matchingRow(kvs.get(0), kvs.get(kvs.size()-1));
-
-        Comparator<Cell> comp = new SearchComparator(kvBuilder, family, qualifier);
-        int pos = Collections.binarySearch(kvs, null, comp);
-        if (pos < 0 || pos == kvs.size()) {
-          return null; // doesn't exist
-        }
-    
-        return kvs.get(pos);
-    }
-
-
-    /**
-     * Binary search for latest column value without allocating memory in the process
-     * @param kvBuilder TODO
-     * @param kvs
-     * @param family
-     * @param qualifier
-     */
-    public static Cell getColumnLatest(KeyValueBuilder kvBuilder, Cell[] kvs, byte[] family, byte[] qualifier) {
-        if (kvs.length == 0) {
-            return null;
-        }
-        assert CellUtil.matchingRow(kvs[0], kvs[kvs.length-1]);
-
-        Comparator<Cell> comp = new SearchComparator(kvBuilder, family, qualifier);
-        int pos = Arrays.binarySearch(kvs, null, comp);
-        if (pos < 0 || pos == kvs.length) {
-          return null; // doesn't exist
-        }
-    
-        return kvs[pos];
-    }
-
-    /*
-     * Special comparator, *only* works for binary search.
-     *
-     * We make the following assumption:
-     * 1. All KVs compared have the same row key
-     * 2. For each (rowkey, family, qualifier) there is at most one version
-     * 3. Current JDKs only uses the search term on the right side
-     *
-     * #1 allows us to avoid row key comparisons altogether.
-     * #2 allows for exact matches
-     * #3 lets us save instanceof checks, and allows to inline the search term in the comparator
-     */
-	private static class SearchComparator implements Comparator<Cell> {
-	  private final KeyValueBuilder kvBuilder;
-    private final byte[] family;
-    private final byte[] qualifier;
-
-    public SearchComparator(KeyValueBuilder kvBuilder, byte[] f, byte[] q) {
-      this.kvBuilder = kvBuilder;
-      family = f;
-      qualifier = q;
-    }
-
-    @Override
-    public int compare(final Cell l, final Cell ignored) {
-			assert ignored == null;
-			// family
-			int val = kvBuilder.compareFamily(l, family, 0, family.length);
-			if (val != 0) {
-				return val;
-			}
-			// qualifier
-			return kvBuilder.compareQualifier(l, qualifier, 0, qualifier.length);
-		}
-	}
-
-	/**
-	 * Calculate the size a mutation will likely take when stored in HBase
-	 * @param m The Mutation
-	 * @return the disk size of the passed mutation
-	 */
-    public static long calculateMutationDiskSize(Mutation m) {
-        long size = 0;
-        for (Entry<byte [], List<Cell>> entry : m.getFamilyCellMap().entrySet()) {
-            for (Cell c : entry.getValue()) {
-                size += org.apache.hadoop.hbase.KeyValueUtil.length(c);
-            }
-        }
-        return size;
-    }
-
-    /**
-     * Estimates the storage size of a row
-     * @param mutations map from table to row to RowMutationState
-     * @return estimated row size
-     */
-    public static long
-            getEstimatedRowSize(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> mutations) {
-        long size = 0;
-        // iterate over tables
-        for (Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> tableEntry : mutations
-                .entrySet()) {
-            PTable table = tableEntry.getKey().getTable();
-            // iterate over rows
-            for (Entry<ImmutableBytesPtr, RowMutationState> rowEntry : tableEntry.getValue()
-                    .entrySet()) {
-                int rowLength = rowEntry.getKey().getLength();
-                Map<PColumn, byte[]> colValueMap = rowEntry.getValue().getColumnValues();
-                switch (table.getImmutableStorageScheme()) {
-                case ONE_CELL_PER_COLUMN:
-                    // iterate over columns
-                    for (Entry<PColumn, byte[]> colValueEntry : colValueMap.entrySet()) {
-                        PColumn pColumn = colValueEntry.getKey();
-                        size +=
-                                KeyValue.getKeyValueDataStructureSize(rowLength,
-                                    pColumn.getFamilyName().getBytes().length,
-                                    pColumn.getColumnQualifierBytes().length,
-                                    colValueEntry.getValue().length);
-                    }
-                    break;
-                case SINGLE_CELL_ARRAY_WITH_OFFSETS:
-                    // we store all the column values in a single key value that contains all the
-                    // column values followed by an offset array
-                    size +=
-                            PArrayDataTypeEncoder.getEstimatedByteSize(table, rowLength,
-                                colValueMap);
-                    break;
-                }
-                // count the empty key value
-                Pair<byte[], byte[]> emptyKeyValueInfo =
-                        EncodedColumnsUtil.getEmptyKeyValueInfo(table);
-                size +=
-                        KeyValue.getKeyValueDataStructureSize(rowLength,
-                            SchemaUtil.getEmptyColumnFamilyPtr(table).getLength(),
-                            emptyKeyValueInfo.getFirst().length,
-                            emptyKeyValueInfo.getSecond().length);
-            }
-        }
-        return size;
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 69eb5bc..7914e3e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -254,7 +254,7 @@ public class MetaDataUtil {
         List<Cell> kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES);
         if (kvs != null) {
             for (Cell cell : kvs) {
-                KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell);
+                KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell);
                 if (builder.compareQualifier(kv, key, 0, key.length) ==0) {
                     builder.getValueAsPtr(kv, ptr);
                     return true;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
new file mode 100644
index 0000000..d532130
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.util;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilder.DataType;
+import org.apache.hadoop.hbase.CellBuilderFactory;
+import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.execute.MutationState.RowMutationState;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.types.PArrayDataTypeEncoder;
+
+/**
+ * 
+ * Utilities for KeyValue. Where there's duplication with KeyValue methods,
+ * these avoid creating new objects when not necessary (primary preventing
+ * byte array copying).
+ *
+ * 
+ * @since 0.1
+ */
+public class PhoenixKeyValueUtil {
+    private PhoenixKeyValueUtil() {
+    }
+
+    public static Cell newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) {
+        return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(key).setFamily(cf)
+                .setQualifier(cq).setTimestamp(ts).setType(DataType.Put)
+                .setValue(value, valueOffset, valueLength).build();
+    }
+
+    public static Cell newKeyValue(ImmutableBytesWritable key, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) {
+        return CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
+                .setRow(key.get(), key.getOffset(), key.getLength()).setFamily(cf).setQualifier(cq)
+                .setTimestamp(ts).setType(DataType.Put).setValue(value, valueOffset, valueLength)
+                .build();
+    }
+
+    public static Cell newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) {
+        return CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
+                .setRow(key, keyOffset, keyLength).setFamily(cf).setQualifier(cq).setTimestamp(ts)
+                .setType(DataType.Put).setValue(value, valueOffset, valueLength).build();
+    }
+    
+    public static Cell newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, 
+        int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength, long ts, byte[] value, 
+        int valueOffset, int valueLength) {
+        return CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
+                .setRow(key, keyOffset, keyLength).setFamily(cf, cfOffset, cfLength)
+                .setQualifier(cq, cqOffset, cqLength).setTimestamp(ts)
+                .setValue(value, valueOffset, valueLength).build();
+    }
+    
+	public static Cell newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value) {
+		return newKeyValue(key, cf, cq, ts, value, 0, value.length);
+	}
+
+    /**
+     * Binary search for latest column value without allocating memory in the process
+     * @param kvBuilder TODO
+     * @param kvs
+     * @param family
+     * @param qualifier
+     */
+    public static Cell getColumnLatest(KeyValueBuilder kvBuilder, List<Cell>kvs, byte[] family, byte[] qualifier) {
+        if (kvs.size() == 0) {
+        	return null;
+        }
+        assert CellUtil.matchingRows(kvs.get(0), kvs.get(kvs.size()-1));
+
+        Comparator<Cell> comp = new SearchComparator(kvBuilder, family, qualifier);
+        int pos = Collections.binarySearch(kvs, null, comp);
+        if (pos < 0 || pos == kvs.size()) {
+          return null; // doesn't exist
+        }
+    
+        return kvs.get(pos);
+    }
+
+
+    /**
+     * Binary search for latest column value without allocating memory in the process
+     * @param kvBuilder TODO
+     * @param kvs
+     * @param family
+     * @param qualifier
+     */
+    public static Cell getColumnLatest(KeyValueBuilder kvBuilder, Cell[] kvs, byte[] family, byte[] qualifier) {
+        if (kvs.length == 0) {
+            return null;
+        }
+        assert CellUtil.matchingRows(kvs[0], kvs[kvs.length-1]);
+
+        Comparator<Cell> comp = new SearchComparator(kvBuilder, family, qualifier);
+        int pos = Arrays.binarySearch(kvs, null, comp);
+        if (pos < 0 || pos == kvs.length) {
+          return null; // doesn't exist
+        }
+    
+        return kvs[pos];
+    }
+
+    /*
+     * Special comparator, *only* works for binary search.
+     *
+     * We make the following assumption:
+     * 1. All KVs compared have the same row key
+     * 2. For each (rowkey, family, qualifier) there is at most one version
+     * 3. Current JDKs only uses the search term on the right side
+     *
+     * #1 allows us to avoid row key comparisons altogether.
+     * #2 allows for exact matches
+     * #3 lets us save instanceof checks, and allows to inline the search term in the comparator
+     */
+	private static class SearchComparator implements Comparator<Cell> {
+	  private final KeyValueBuilder kvBuilder;
+    private final byte[] family;
+    private final byte[] qualifier;
+
+    public SearchComparator(KeyValueBuilder kvBuilder, byte[] f, byte[] q) {
+      this.kvBuilder = kvBuilder;
+      family = f;
+      qualifier = q;
+    }
+
+    @Override
+    public int compare(final Cell l, final Cell ignored) {
+			assert ignored == null;
+			// family
+			int val = kvBuilder.compareFamily(l, family, 0, family.length);
+			if (val != 0) {
+				return val;
+			}
+			// qualifier
+			return kvBuilder.compareQualifier(l, qualifier, 0, qualifier.length);
+		}
+	}
+
+	/**
+	 * Calculate the size a mutation will likely take when stored in HBase
+	 * @param m The Mutation
+	 * @return the disk size of the passed mutation
+	 */
+    public static long calculateMutationDiskSize(Mutation m) {
+        long size = 0;
+        for (Entry<byte [], List<Cell>> entry : m.getFamilyCellMap().entrySet()) {
+            for (Cell c : entry.getValue()) {
+                size += org.apache.hadoop.hbase.KeyValueUtil.length(c);
+            }
+        }
+        return size;
+    }
+
+    /**
+     * Estimates the storage size of a row
+     * @param mutations map from table to row to RowMutationState
+     * @return estimated row size
+     */
+    public static long
+            getEstimatedRowSize(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> mutations) {
+        long size = 0;
+        // iterate over tables
+        for (Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> tableEntry : mutations
+                .entrySet()) {
+            PTable table = tableEntry.getKey().getTable();
+            // iterate over rows
+            for (Entry<ImmutableBytesPtr, RowMutationState> rowEntry : tableEntry.getValue()
+                    .entrySet()) {
+                int rowLength = rowEntry.getKey().getLength();
+                Map<PColumn, byte[]> colValueMap = rowEntry.getValue().getColumnValues();
+                switch (table.getImmutableStorageScheme()) {
+                case ONE_CELL_PER_COLUMN:
+                    // iterate over columns
+                    for (Entry<PColumn, byte[]> colValueEntry : colValueMap.entrySet()) {
+                        PColumn pColumn = colValueEntry.getKey();
+                        size +=
+                                KeyValue.getKeyValueDataStructureSize(rowLength,
+                                    pColumn.getFamilyName().getBytes().length,
+                                    pColumn.getColumnQualifierBytes().length,
+                                    colValueEntry.getValue().length);
+                    }
+                    break;
+                case SINGLE_CELL_ARRAY_WITH_OFFSETS:
+                    // we store all the column values in a single key value that contains all the
+                    // column values followed by an offset array
+                    size +=
+                            PArrayDataTypeEncoder.getEstimatedByteSize(table, rowLength,
+                                colValueMap);
+                    break;
+                }
+                // count the empty key value
+                Pair<byte[], byte[]> emptyKeyValueInfo =
+                        EncodedColumnsUtil.getEmptyKeyValueInfo(table);
+                size +=
+                        KeyValue.getKeyValueDataStructureSize(rowLength,
+                            SchemaUtil.getEmptyColumnFamilyPtr(table).getLength(),
+                            emptyKeyValueInfo.getFirst().length,
+                            emptyKeyValueInfo.getSecond().length);
+            }
+        }
+        return size;
+    }
+
+    public static KeyValue maybeCopyCell(Cell c) {
+        // Same as KeyValueUtil, but HBase has deprecated this method. Avoid depending on something
+        // that will likely be removed at some point in time.
+        if (c == null) return null;
+        if (c instanceof KeyValue) {
+            return (KeyValue) c;
+        }
+        return KeyValueUtil.copyToNewKeyValue(c);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index 16ef206..6b5a73a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -53,7 +53,6 @@ import org.apache.commons.cli.PosixParser;
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
@@ -358,8 +357,8 @@ public class PhoenixRuntime {
      * @throws SQLException
      */
     @Deprecated
-    public static List<KeyValue> getUncommittedData(Connection conn) throws SQLException {
-        Iterator<Pair<byte[],List<KeyValue>>> iterator = getUncommittedDataIterator(conn);
+    public static List<Cell> getUncommittedData(Connection conn) throws SQLException {
+        Iterator<Pair<byte[],List<Cell>>> iterator = getUncommittedDataIterator(conn);
         if (iterator.hasNext()) {
             return iterator.next().getSecond();
         }
@@ -373,7 +372,7 @@ public class PhoenixRuntime {
      * @return the list of HBase mutations for uncommitted data
      * @throws SQLException
      */
-    public static Iterator<Pair<byte[],List<KeyValue>>> getUncommittedDataIterator(Connection conn) throws SQLException {
+    public static Iterator<Pair<byte[],List<Cell>>> getUncommittedDataIterator(Connection conn) throws SQLException {
         return getUncommittedDataIterator(conn, false);
     }
 
@@ -384,10 +383,10 @@ public class PhoenixRuntime {
      * @return the list of HBase mutations for uncommitted data
      * @throws SQLException
      */
-    public static Iterator<Pair<byte[],List<KeyValue>>> getUncommittedDataIterator(Connection conn, boolean includeMutableIndexes) throws SQLException {
+    public static Iterator<Pair<byte[],List<Cell>>> getUncommittedDataIterator(Connection conn, boolean includeMutableIndexes) throws SQLException {
         final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
         final Iterator<Pair<byte[],List<Mutation>>> iterator = pconn.getMutationState().toMutations(includeMutableIndexes);
-        return new Iterator<Pair<byte[],List<KeyValue>>>() {
+        return new Iterator<Pair<byte[],List<Cell>>>() {
 
             @Override
             public boolean hasNext() {
@@ -395,18 +394,18 @@ public class PhoenixRuntime {
             }
 
             @Override
-            public Pair<byte[], List<KeyValue>> next() {
+            public Pair<byte[], List<Cell>> next() {
                 Pair<byte[],List<Mutation>> pair = iterator.next();
-                List<KeyValue> keyValues = Lists.newArrayListWithExpectedSize(pair.getSecond().size() * 5); // Guess-timate 5 key values per row
+                List<Cell> keyValues = Lists.newArrayListWithExpectedSize(pair.getSecond().size() * 5); // Guess-timate 5 key values per row
                 for (Mutation mutation : pair.getSecond()) {
                     for (List<Cell> keyValueList : mutation.getFamilyCellMap().values()) {
                         for (Cell keyValue : keyValueList) {
-                            keyValues.add(org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(keyValue));
+                            keyValues.add(PhoenixKeyValueUtil.maybeCopyCell(keyValue));
                         }
                     }
                 }
                 Collections.sort(keyValues, pconn.getKeyValueBuilder().getKeyValueComparator());
-                return new Pair<byte[], List<KeyValue>>(pair.getFirst(),keyValues);
+                return new Pair<byte[], List<Cell>>(pair.getFirst(),keyValues);
             }
 
             @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
index f97230b..967f38d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
@@ -46,7 +46,7 @@ public class ResultUtil {
     public static Result toResult(ImmutableBytesWritable bytes) {
         byte [] buf = bytes.get();
         int offset = bytes.getOffset();
-        int finalOffset = bytes.getSize() + offset;
+        int finalOffset = bytes.getLength() + offset;
         List<Cell> kvs = new ArrayList<Cell>();
         while(offset < finalOffset) {
           int keyLength = Bytes.toInt(buf, offset);
@@ -70,9 +70,8 @@ public class ResultUtil {
         //key.set(getRawBytes(r), getKeyOffset(r), getKeyLength(r));
     }
     
-    @SuppressWarnings("deprecation")
-    public static void getKey(KeyValue value, ImmutableBytesWritable key) {
-        key.set(value.getBuffer(), value.getRowOffset(), value.getRowLength());
+    public static void getKey(Cell value, ImmutableBytesWritable key) {
+        key.set(value.getRowArray(), value.getRowOffset(), value.getRowLength());
     }
     
     /**
@@ -109,7 +108,7 @@ public class ResultUtil {
      * @param r
      */
     static int getKeyOffset(Result r) {
-        KeyValue firstKV = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(r.rawCells()[0]);
+        KeyValue firstKV = PhoenixKeyValueUtil.maybeCopyCell(r.rawCells()[0]);
         return firstKV.getOffset();
     }
     
@@ -118,9 +117,8 @@ public class ResultUtil {
         return Bytes.toShort(getRawBytes(r), getKeyOffset(r) - Bytes.SIZEOF_SHORT);
     }
     
-    @SuppressWarnings("deprecation")
     static byte[] getRawBytes(Result r) {
-        KeyValue firstKV = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(r.rawCells()[0]);
+        KeyValue firstKV = PhoenixKeyValueUtil.maybeCopyCell(r.rawCells()[0]);
         return firstKV.getBuffer();
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
index f495a7e..ec9bf49 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
@@ -137,18 +137,17 @@ public class TupleUtil {
         }
     }
     
-    @SuppressWarnings("deprecation")
     public static int write(Tuple result, DataOutput out) throws IOException {
         int size = 0;
         for(int i = 0; i < result.size(); i++) {
-            KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(result.getValue(i));
+            KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i));
             size += kv.getLength();
             size += Bytes.SIZEOF_INT; // kv.getLength
           }
 
         WritableUtils.writeVInt(out, size);
         for(int i = 0; i < result.size(); i++) {
-            KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(result.getValue(i));
+            KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i));
             out.writeInt(kv.getLength());
             out.write(kv.getBuffer(), kv.getOffset(), kv.getLength());
           }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 33ad7e5..43f11b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -648,7 +648,7 @@ public class UpgradeUtil {
         Table sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
         try {
             logger.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
-            KeyValue saltKV = KeyValueUtil.newKeyValue(seqTableKey, 
+            Cell saltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, 
                     PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
                     MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
@@ -667,7 +667,7 @@ public class UpgradeUtil {
                 // This is needed as a fix for https://issues.apache.org/jira/browse/PHOENIX-1401 
                 if (oldTable.getTimeStamp() == MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0) {
                     byte[] oldSeqNum = PLong.INSTANCE.toBytes(oldTable.getSequenceNumber());
-                    KeyValue seqNumKV = KeyValueUtil.newKeyValue(seqTableKey, 
+                    Cell seqNumKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, 
                             PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                             PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES,
                             MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
@@ -761,7 +761,7 @@ public class UpgradeUtil {
                             if (!success) {
                                 if (!committed) { // Try to recover by setting salting back to off, as we haven't successfully committed anything
                                     // Don't use Delete here as we'd never be able to change it again at this timestamp.
-                                    KeyValue unsaltKV = KeyValueUtil.newKeyValue(seqTableKey, 
+                                    Cell unsaltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, 
                                             PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                                             PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
                                             MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
@@ -1675,7 +1675,7 @@ public class UpgradeUtil {
         if (!columnCells.isEmpty() && (timestamp = columnCells.get(0)
                 .getTimestamp()) < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) {
 
-            KeyValue upgradeKV = KeyValueUtil.newKeyValue(statsTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            Cell upgradeKV = PhoenixKeyValueUtil.newKeyValue(statsTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     UPGRADE_TO_4_7_COLUMN_NAME, timestamp, PBoolean.INSTANCE.toBytes(true));
             Put upgradePut = new Put(statsTableKey);
             upgradePut.add(upgradeKV);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
index 8bb491d..ddbd4a3 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
@@ -32,15 +32,16 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.codec.Codec;
 import org.apache.hadoop.hbase.io.util.LRUDictionary;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.phoenix.hbase.index.IndexTestingUtils;
 import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -130,7 +131,7 @@ public class ReadWriteKeyValuesWithCodecTest {
   private void addMutation(WALEdit edit, Mutation m, byte[] family) {
     List<Cell> kvs = m.getFamilyCellMap().get(FAMILY);
     for (Cell kv : kvs) {
-      edit.add(KeyValueUtil.ensureKeyValue(kv));
+      edit.add(PhoenixKeyValueUtil.maybeCopyCell(kv));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
index 8553b73..0374044 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java
@@ -27,6 +27,7 @@ import java.sql.DriverManager;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -95,19 +96,19 @@ public class MutationStateTest {
             conn.createStatement().execute("upsert into MUTATION_TEST2(id2,appId2) values(222,'app2')");
 
 
-            Iterator<Pair<byte[],List<KeyValue>>> dataTableNameAndMutationKeyValuesIter =
+            Iterator<Pair<byte[],List<Cell>>> dataTableNameAndMutationKeyValuesIter =
                     PhoenixRuntime.getUncommittedDataIterator(conn);
 
 
             assertTrue(dataTableNameAndMutationKeyValuesIter.hasNext());
-            Pair<byte[],List<KeyValue>> pair=dataTableNameAndMutationKeyValuesIter.next();
+            Pair<byte[],List<Cell>> pair=dataTableNameAndMutationKeyValuesIter.next();
             String tableName1=Bytes.toString(pair.getFirst());
-            List<KeyValue> keyValues1=pair.getSecond();
+            List<Cell> keyValues1=pair.getSecond();
 
             assertTrue(dataTableNameAndMutationKeyValuesIter.hasNext());
             pair=dataTableNameAndMutationKeyValuesIter.next();
             String tableName2=Bytes.toString(pair.getFirst());
-            List<KeyValue> keyValues2=pair.getSecond();
+            List<Cell> keyValues2=pair.getSecond();
 
             if("MUTATION_TEST1".equals(tableName1)) {
                 assertTable(tableName1, keyValues1, tableName2, keyValues2);
@@ -124,7 +125,7 @@ public class MutationStateTest {
         }
     }
 
-    private void assertTable(String tableName1,List<KeyValue> keyValues1,String tableName2,List<KeyValue> keyValues2) {
+    private void assertTable(String tableName1,List<Cell> keyValues1,String tableName2,List<Cell> keyValues2) {
         assertTrue("MUTATION_TEST1".equals(tableName1));
         assertTrue(Bytes.equals(PUnsignedInt.INSTANCE.toBytes(111),CellUtil.cloneRow(keyValues1.get(0))));
         assertTrue("app1".equals(PVarchar.INSTANCE.toObject(CellUtil.cloneValue(keyValues1.get(1)))));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
index 195c2f0..5383d9b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
@@ -61,7 +61,7 @@ import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PIntegerArray;
 import org.apache.phoenix.schema.types.PVarcharArray;
 import org.apache.phoenix.schema.types.PhoenixArray;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.junit.Test;
 
 import com.google.common.collect.Lists;
@@ -159,7 +159,7 @@ public class UnnestArrayPlanTest {
         for (Object[] array : arrays) {
             PhoenixArray pArray = new PhoenixArray(baseType, array);
             byte[] bytes = arrayType.toBytes(pArray);            
-            tuples.add(new SingleKeyValueTuple(KeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0)));
+            tuples.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0)));
         }
         
         return tuples;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
index 6c28cdf..603b68e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
@@ -24,6 +24,7 @@ import java.util.Collection;
 import java.util.List;
 
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.KeyRange;
@@ -608,13 +609,13 @@ public class SkipScanFilterTest extends TestCase {
 
         @SuppressWarnings("deprecation")
         @Override public void examine(SkipScanFilter skipper) throws IOException {
-            KeyValue kv = KeyValue.createFirstOnRow(rowkey);
+            KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey);
             skipper.reset();
             assertFalse(skipper.filterAllRemaining());
             assertFalse(skipper.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()));
 
             assertEquals(ReturnCode.SEEK_NEXT_USING_HINT, skipper.filterKeyValue(kv));
-            assertEquals(KeyValue.createFirstOnRow(hint), skipper.getNextCellHint(kv));
+            assertEquals(KeyValueUtil.createFirstOnRow(hint), skipper.getNextCellHint(kv));
         }
 
         @Override public String toString() {
@@ -634,7 +635,7 @@ public class SkipScanFilterTest extends TestCase {
         
         @SuppressWarnings("deprecation")
         @Override public void examine(SkipScanFilter skipper) throws IOException {
-            KeyValue kv = KeyValue.createFirstOnRow(rowkey);
+            KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey);
             skipper.reset();
             assertFalse(skipper.filterAllRemaining());
             assertFalse(skipper.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()));
@@ -657,7 +658,7 @@ public class SkipScanFilterTest extends TestCase {
         }
 
         @Override public void examine(SkipScanFilter skipper) throws IOException {
-            KeyValue kv = KeyValue.createFirstOnRow(rowkey);
+            KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey);
             skipper.reset();
             assertEquals(ReturnCode.NEXT_ROW,skipper.filterKeyValue(kv));
             skipper.reset();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
index 5868103..39e9680 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java
@@ -26,6 +26,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Result;
@@ -72,11 +73,11 @@ public class IndexTestingUtils {
     // s.setRaw(true);
     s.setMaxVersions();
     s.setTimeRange(start, end);
-    List<KeyValue> received = new ArrayList<KeyValue>();
+    List<Cell> received = new ArrayList<Cell>();
     ResultScanner scanner = index1.getScanner(s);
     for (Result r : scanner) {
-      received.addAll(r.list());
-      LOG.debug("Received: " + r.list());
+      received.addAll(r.listCells());
+      LOG.debug("Received: " + r.listCells());
     }
     scanner.close();
     assertEquals("Didn't get the expected kvs from the index table!", expected, received);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java
index fc4734d..a83aeff 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java
@@ -17,6 +17,7 @@ import java.util.Map.Entry;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -172,8 +173,11 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec {
             }
             // there is a next value - we only care about the current value, so we can just snag that
             Cell next = kvs.next();
-            if (ref.matchesFamily(next.getFamily()) && ref.matchesQualifier(next.getQualifier())) {
-                byte[] v = next.getValue();
+            if (ref.matchesFamily(next.getFamilyArray(), next.getFamilyOffset(),
+                next.getFamilyLength())
+                    && ref.matchesQualifier(next.getQualifierArray(), next.getQualifierOffset(),
+                        next.getQualifierLength())) {
+                byte[] v = CellUtil.cloneValue(next);
                 totalValueLength += v.length;
                 entries.add(new ColumnEntry(v, ref));
             } else {
@@ -188,20 +192,20 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec {
             }
             // matches all columns, so we need to iterate until we hit the next column with the same
             // family as the current key
-            byte[] lastQual = next.getQualifier();
+            byte[] lastQual = CellUtil.cloneQualifier(next);
             byte[] nextQual = null;
             while ((next = kvs.next()) != null) {
                 // different family, done with this column
-                if (!ref.matchesFamily(next.getFamily())) {
+                if (!ref.matchesFamily(next.getFamilyArray(), next.getFamilyOffset(), next.getFamilyLength())) {
                     break;
                 }
-                nextQual = next.getQualifier();
+                nextQual = CellUtil.cloneQualifier(next);
                 // we are still on the same qualifier - skip it, since we already added a column for it
                 if (Arrays.equals(lastQual, nextQual)) {
                     continue;
                 }
                 // this must match the qualifier since its an all-qualifiers specifier, so we add it
-                byte[] v = next.getValue();
+                byte[] v = CellUtil.cloneValue(next);
                 totalValueLength += v.length;
                 entries.add(new ColumnEntry(v, ref));
                 // update the last qualifier to check against
@@ -285,7 +289,7 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec {
      *            expected value--column pair
      * @return a keyvalues that the index contains for a given row at a timestamp with the given value -- column pairs.
      */
-    public static List<KeyValue> getIndexKeyValueForTesting(byte[] pk, long timestamp,
+    public static List<Cell> getIndexKeyValueForTesting(byte[] pk, long timestamp,
             List<Pair<byte[], CoveredColumn>> values) {
 
         int length = 0;
@@ -299,8 +303,8 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec {
         byte[] rowKey = CoveredColumnIndexCodec.composeRowKey(pk, length, expected);
         Put p = new Put(rowKey, timestamp);
         CoveredColumnIndexCodec.addColumnsToPut(p, expected);
-        List<KeyValue> kvs = new ArrayList<KeyValue>();
-        for (Entry<byte[], List<KeyValue>> entry : p.getFamilyMap().entrySet()) {
+        List<Cell> kvs = new ArrayList<Cell>();
+        for (Entry<byte[], List<Cell>> entry : p.getFamilyCellMap().entrySet()) {
             kvs.addAll(entry.getValue());
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
index b3b143e..cc74dda 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
@@ -40,6 +40,7 @@ import org.apache.phoenix.hbase.index.covered.data.LocalTable;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.scanner.Scanner;
 import org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -99,7 +100,7 @@ public class LocalTableStateTest {
     LocalHBaseState state = new LocalTable(env);
     LocalTableState table = new LocalTableState(env, state, m);
     //add the kvs from the mutation
-    table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
+    table.addPendingUpdates(m.get(fam, qual));
 
     // setup the lookup
     ColumnReference col = new ColumnReference(fam, qual);
@@ -145,7 +146,7 @@ public class LocalTableStateTest {
     LocalHBaseState state = new LocalTable(env);
     LocalTableState table = new LocalTableState(env, state, m);
     //add the kvs from the mutation
-    table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
+    table.addPendingUpdates(m.get(fam, qual));
 
     // setup the lookup
     ColumnReference col = new ColumnReference(fam, qual);
@@ -182,7 +183,7 @@ public class LocalTableStateTest {
     LocalHBaseState state = new LocalTable(env);
     LocalTableState table = new LocalTableState(env, state, m);
     //add the kvs from the mutation
-    table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
+    table.addPendingUpdates(m.get(fam, qual));
 
     // setup the lookup
     ColumnReference col = new ColumnReference(fam, qual);
@@ -224,7 +225,7 @@ public class LocalTableStateTest {
     LocalHBaseState state = new LocalTable(env);
     LocalTableState table = new LocalTableState(env, state, m);
     // add the kvs from the mutation
-    KeyValue kv = KeyValueUtil.ensureKeyValue(m.get(fam, qual).get(0));
+    KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(m.get(fam, qual).get(0));
     kv.setSequenceId(0);
     table.addPendingUpdates(kv);
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java
index 7dd46d6..8c0a693 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java
@@ -165,7 +165,7 @@ public class TestCoveredColumnIndexCodec {
     // start with a basic put that has some keyvalues
     Put p = new Put(PK);
     // setup the kvs to add
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
+    List<Cell> kvs = new ArrayList<Cell>();
     byte[] v1 = Bytes.toBytes("v1");
     KeyValue kv = new KeyValue(PK, FAMILY, QUAL, 1, v1);
     kvs.add(kv);
@@ -203,7 +203,7 @@ public class TestCoveredColumnIndexCodec {
     d.addFamily(FAMILY, 2);
     // setup the next batch of 'current state', basically just ripping out the current state from
     // the last round
-    table = new SimpleTableState(new Result(kvs));
+    table = new SimpleTableState(Result.create(kvs));
     state = new LocalTableState(env, table, d);
     state.setCurrentTimestamp(2);
     // check the cleanup of the current table, after the puts (mocking a 'next' update)
@@ -230,13 +230,13 @@ public class TestCoveredColumnIndexCodec {
     ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
   }
 
-  private void ensureNoUpdatesWhenCoveredByDelete(RegionCoprocessorEnvironment env, IndexCodec codec, List<KeyValue> currentState,
+  private void ensureNoUpdatesWhenCoveredByDelete(RegionCoprocessorEnvironment env, IndexCodec codec, List<Cell> currentState,
       Delete d) throws IOException {
-    LocalHBaseState table = new SimpleTableState(new Result(currentState));
+    LocalHBaseState table = new SimpleTableState(Result.create(currentState));
     LocalTableState state = new LocalTableState(env, table, d);
     state.setCurrentTimestamp(d.getTimeStamp());
     // now we shouldn't see anything when getting the index update
-    state.addPendingUpdates(d.getFamilyMap().get(FAMILY));
+    state.addPendingUpdates(d.getFamilyCellMap().get(FAMILY));
     Iterable<IndexUpdate> updates = codec.getIndexUpserts(state, IndexMetaData.NULL_INDEX_META_DATA);
     for (IndexUpdate update : updates) {
       assertFalse("Had some index updates, though it should have been covered by the delete",

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
index 400757d..bcd5666 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
@@ -20,8 +20,10 @@ package org.apache.phoenix.hbase.index.covered.data;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.scanner.ReseekableScanner;
 import org.junit.Test;
@@ -36,7 +38,7 @@ public class TestIndexMemStore {
 
   @Test
   public void testCorrectOverwritting() throws Exception {
-    IndexMemStore store = new IndexMemStore(IndexMemStore.COMPARATOR);
+    IndexMemStore store = new IndexMemStore(CellComparatorImpl.COMPARATOR);
     long ts = 10;
     KeyValue kv = new KeyValue(row, family, qual, ts, Type.Put, val);
     kv.setSequenceId(2);
@@ -46,7 +48,7 @@ public class TestIndexMemStore {
     // adding the exact same kv shouldn't change anything stored if not overwritting
     store.add(kv2, false);
     ReseekableScanner scanner = store.getScanner();
-    KeyValue first = KeyValue.createFirstOnRow(row);
+    KeyValue first = KeyValueUtil.createFirstOnRow(row);
     scanner.seek(first);
     assertTrue("Overwrote kv when specifically not!", kv == scanner.next());
     scanner.close();
@@ -80,7 +82,7 @@ public class TestIndexMemStore {
 
     // null qualifiers should always sort before the non-null cases
     ReseekableScanner scanner = store.getScanner();
-    KeyValue first = KeyValue.createFirstOnRow(row);
+    KeyValue first = KeyValueUtil.createFirstOnRow(row);
     assertTrue("Didn't have any data in the scanner", scanner.seek(first));
     assertTrue("Didn't get delete family first (no qualifier == sort first)", df == scanner.next());
     assertTrue("Didn't get point delete before corresponding put", d == scanner.next());