You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by je...@apache.org on 2014/02/08 08:42:51 UTC

[02/11] Port Phoenix to Hbase0.98

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 7b07e7c..4f4998f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -58,7 +58,7 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.wal.WALEditCodec;
+import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -404,7 +404,7 @@ public class QueryServicesOptions {
     }
 
     public QueryServicesOptions setWALEditCodec(String walEditCodec) {
-        return set(WALEditCodec.WAL_EDIT_CODEC_CLASS_KEY, walEditCodec);
+        return set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, walEditCodec);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
index d4577c3..f623c28 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateColumn.java
@@ -50,16 +50,6 @@ public class DelegateColumn extends DelegateDatum implements PColumn {
     }
 
     @Override
-    public void readFields(DataInput input) throws IOException {
-        getDelegate().readFields(input);
-    }
-
-    @Override
-    public void write(DataOutput output) throws IOException {
-        getDelegate().write(output);
-    }
-
-    @Override
     public int getPosition() {
         return getDelegate().getPosition();
     }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 46e0f78..45745d6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1097,12 +1097,12 @@ public class MetaDataClient {
             long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
             List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
             @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
-            Delete tableDelete = new Delete(key, clientTimeStamp, null);
+            Delete tableDelete = new Delete(key, clientTimeStamp);
             tableMetaData.add(tableDelete);
             if (parentTableName != null) {
                 byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantId, schemaName, parentTableName, tableName);
                 @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
-                Delete linkDelete = new Delete(linkKey, clientTimeStamp, null);
+                Delete linkDelete = new Delete(linkKey, clientTimeStamp);
                 tableMetaData.add(linkDelete);
             }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
index 7f58495..cfc81be 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
@@ -19,15 +19,13 @@
  */
 package org.apache.phoenix.schema;
 
-import org.apache.hadoop.io.Writable;
-
 /**
  * Definition of a Phoenix column
  *
  * 
  * @since 0.1
  */
-public interface PColumn extends PDatum, Writable {
+public interface PColumn extends PDatum {
 
     /**
      * @return the name of the column qualifier

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
index 03e78f8..a16c188 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
@@ -19,22 +19,12 @@
  */
 package org.apache.phoenix.schema;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.WritableUtils;
+import com.google.protobuf.HBaseZeroCopyByteString;
+import org.apache.phoenix.coprocessor.generated.PTableProtos;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.util.ByteUtil;
-
 import com.google.common.base.Preconditions;
 
-
 public class PColumnImpl implements PColumn {
-    private static final Integer NO_MAXLENGTH = Integer.MIN_VALUE;
-    private static final Integer NO_SCALE = Integer.MIN_VALUE;
-
     private PName name;
     private PName familyName;
     private PDataType dataType;
@@ -144,37 +134,6 @@ public class PColumnImpl implements PColumn {
     public String toString() {
         return (familyName == null ? "" : familyName.toString() + QueryConstants.NAME_SEPARATOR) + name.toString();
     }
-
-    @Override
-    public void readFields(DataInput input) throws IOException {
-        byte[] columnNameBytes = Bytes.readByteArray(input);
-        PName columnName = PNameFactory.newName(columnNameBytes);
-        byte[] familyNameBytes = Bytes.readByteArray(input);
-        PName familyName = familyNameBytes.length == 0 ? null : PNameFactory.newName(familyNameBytes);
-        // TODO: optimize the reading/writing of this b/c it could likely all fit in a single byte or two
-        PDataType dataType = PDataType.values()[WritableUtils.readVInt(input)];
-        int maxLength = WritableUtils.readVInt(input);
-        int scale = WritableUtils.readVInt(input);
-        boolean nullable = input.readBoolean();
-        int position = WritableUtils.readVInt(input);
-        ColumnModifier columnModifier = ColumnModifier.fromSystemValue(WritableUtils.readVInt(input));
-        int arrSize = WritableUtils.readVInt(input);
-        init(columnName, familyName, dataType, maxLength == NO_MAXLENGTH ? null : maxLength,
-                scale == NO_SCALE ? null : scale, nullable, position, columnModifier, arrSize == -1 ? null : arrSize);
-    }
-
-    @Override
-    public void write(DataOutput output) throws IOException {
-        Bytes.writeByteArray(output, name.getBytes());
-        Bytes.writeByteArray(output, familyName == null ? ByteUtil.EMPTY_BYTE_ARRAY : familyName.getBytes());
-        WritableUtils.writeVInt(output, dataType.ordinal());
-        WritableUtils.writeVInt(output, maxLength == null ? NO_MAXLENGTH : maxLength);
-        WritableUtils.writeVInt(output, scale == null ? NO_SCALE : scale);
-        output.writeBoolean(nullable);
-        WritableUtils.writeVInt(output, position);
-        WritableUtils.writeVInt(output, ColumnModifier.toSystemValue(columnModifier));
-        WritableUtils.writeVInt(output, arraySize == null ? -1 : arraySize);
-    }
     
     @Override
     public int hashCode() {
@@ -204,4 +163,57 @@ public class PColumnImpl implements PColumn {
     public Integer getArraySize() {
         return arraySize;
     }
+
+  /**
+   * Create a PColumn instance from PBed PColumn instance
+   * @param column
+   */
+  public static PColumn createFromProto(PTableProtos.PColumn column) {
+    byte[] columnNameBytes = column.getColumnNameBytes().toByteArray();
+    PName columnName = PNameFactory.newName(columnNameBytes);
+    PName familyName = null;
+    if (column.hasFamilyNameBytes()) {
+      familyName = PNameFactory.newName(column.getFamilyNameBytes().toByteArray());
+    }
+    PDataType dataType = PDataType.fromSqlTypeName(column.getDataType());
+    Integer maxLength = null;
+    if (column.hasMaxLength()) {
+      maxLength = column.getMaxLength();
+    }
+    Integer scale = null;
+    if (column.hasScale()) {
+      scale = column.getScale();
+    }
+    boolean nullable = column.getNullable();
+    int position = column.getPosition();
+    ColumnModifier columnModifier = ColumnModifier.fromSystemValue(column.getColumnModifier());
+    Integer arraySize = null;
+    if(column.hasArraySize()){
+      arraySize = column.getArraySize();
+    }
+    return new PColumnImpl(columnName, familyName, dataType, maxLength, scale, nullable, position,
+        columnModifier, arraySize);
+  }
+
+  public static PTableProtos.PColumn toProto(PColumn column) {
+    PTableProtos.PColumn.Builder builder = PTableProtos.PColumn.newBuilder();
+    builder.setColumnNameBytes(HBaseZeroCopyByteString.wrap(column.getName().getBytes()));
+    if (column.getFamilyName() != null) {
+      builder.setFamilyNameBytes(HBaseZeroCopyByteString.wrap(column.getFamilyName().getBytes()));
+    }
+    builder.setDataType(column.getDataType().getSqlTypeName());
+    if (column.getMaxLength() != null) {
+      builder.setMaxLength(column.getMaxLength());
+    }
+    if (column.getScale() != null) {
+      builder.setScale(column.getScale());
+    }
+    builder.setNullable(column.isNullable());
+    builder.setPosition(column.getPosition());
+    builder.setColumnModifier(ColumnModifier.toSystemValue(column.getColumnModifier()));
+    if(column.getArraySize() != null){
+      builder.setArraySize(column.getArraySize());
+    }
+    return builder.build();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index eeffc4c..3e0e1cb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -35,7 +35,7 @@ import org.apache.phoenix.schema.stat.PTableStats;
  * 
  * @since 0.1
  */
-public interface PTable extends Writable {
+public interface PTable {
     public static final long INITIAL_SEQ_NUM = 0;
     public static final String IS_IMMUTABLE_ROWS_PROP_NAME = "IMMUTABLE_ROWS";
     public static final boolean DEFAULT_DISABLE_WAL = false;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index f2a3e76..3827c6b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -24,8 +24,6 @@ import static org.apache.phoenix.client.KeyValueBuilder.deleteQuietly;
 import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE;
 import static org.apache.phoenix.schema.SaltingUtil.SALTING_COLUMN;
 
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 import java.sql.SQLException;
 import java.util.ArrayList;
@@ -34,18 +32,21 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.client.KeyValueBuilder;
+import org.apache.phoenix.coprocessor.generated.PTableProtos;
 import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.protobuf.ProtobufUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
 import org.apache.phoenix.schema.stat.PTableStats;
@@ -63,7 +64,7 @@ import com.google.common.collect.ImmutableSortedMap;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-
+import com.google.protobuf.HBaseZeroCopyByteString;
 
 /**
  * 
@@ -543,13 +544,14 @@ public class PTableImpl implements PTable {
         }
 
         private void removeIfPresent(Mutation m, byte[] family, byte[] qualifier) {
-            Map<byte[],List<KeyValue>> familyMap = m.getFamilyMap();
-            List<KeyValue> kvs = familyMap.get(family);
+            Map<byte[],List<Cell>> familyMap = m.getFamilyCellMap();
+            List<Cell> kvs = familyMap.get(family);
             if (kvs != null) {
-                Iterator<KeyValue> iterator = kvs.iterator();
+                Iterator<Cell> iterator = kvs.iterator();
                 while (iterator.hasNext()) {
-                    KeyValue kv = iterator.next();
-                    if (Bytes.compareTo(kv.getQualifier(), qualifier) == 0) {
+                    Cell kv = iterator.next();
+                    if (Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), 
+                          qualifier, 0, qualifier.length) == 0) {
                         iterator.remove();
                     }
                 }
@@ -591,16 +593,18 @@ public class PTableImpl implements PTable {
             }
         }
         
-        @SuppressWarnings("deprecation")
         @Override
         public void delete() {
             newMutations();
             // FIXME: the version of the Delete constructor without the lock args was introduced
             // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
             // of the client.
-            Delete delete = new Delete(key,ts,null);
+            Delete delete = new Delete(key,ts);
             deleteRow = delete;
-            deleteRow.setWriteToWAL(!isWALDisabled());
+            // No need to write to the WAL for indexes
+            if (PTableImpl.this.getType() == PTableType.INDEX) {
+                deleteRow.setDurability(Durability.SKIP_WAL);
+            }
         }
     }
 
@@ -644,126 +648,6 @@ public class PTableImpl implements PTable {
     }
 
     @Override
-    public void readFields(DataInput input) throws IOException {
-        byte[] schemaNameBytes = Bytes.readByteArray(input);
-        byte[] tableNameBytes = Bytes.readByteArray(input);
-        PName schemaName = PNameFactory.newName(schemaNameBytes);
-        PName tableName = PNameFactory.newName(tableNameBytes);
-        PTableType tableType = PTableType.values()[WritableUtils.readVInt(input)];
-        PIndexState indexState = null;
-        if (tableType == PTableType.INDEX) {
-            int ordinal = WritableUtils.readVInt(input);
-            if (ordinal >= 0) {
-                indexState = PIndexState.values()[ordinal];
-            }
-        }
-        long sequenceNumber = WritableUtils.readVLong(input);
-        long timeStamp = input.readLong();
-        byte[] pkNameBytes = Bytes.readByteArray(input);
-        PName pkName = pkNameBytes.length == 0 ? null : PNameFactory.newName(pkNameBytes);
-        Integer bucketNum = WritableUtils.readVInt(input);
-        int nColumns = WritableUtils.readVInt(input);
-        List<PColumn> columns = Lists.newArrayListWithExpectedSize(nColumns);
-        for (int i = 0; i < nColumns; i++) {
-            PColumn column = new PColumnImpl();
-            column.readFields(input);
-            columns.add(column);
-        }
-        int nIndexes = WritableUtils.readVInt(input);
-        List<PTable> indexes = Lists.newArrayListWithExpectedSize(nIndexes);
-        for (int i = 0; i < nIndexes; i++) {
-            PTable index = new PTableImpl();
-            index.readFields(input);
-            indexes.add(index);
-        }
-        boolean isImmutableRows = input.readBoolean();
-        Map<String, byte[][]> guidePosts = new HashMap<String, byte[][]>();
-        int size = WritableUtils.readVInt(input);
-        for (int i=0; i<size; i++) {
-            String key = WritableUtils.readString(input);
-            int valueSize = WritableUtils.readVInt(input);
-            byte[][] value = new byte[valueSize][];
-            for (int j=0; j<valueSize; j++) {
-                value[j] = Bytes.readByteArray(input);
-            }
-            guidePosts.put(key, value);
-        }
-        byte[] dataTableNameBytes = Bytes.readByteArray(input);
-        PName dataTableName = dataTableNameBytes.length == 0 ? null : PNameFactory.newName(dataTableNameBytes);
-        byte[] defaultFamilyNameBytes = Bytes.readByteArray(input);
-        PName defaultFamilyName = defaultFamilyNameBytes.length == 0 ? null : PNameFactory.newName(defaultFamilyNameBytes);
-        boolean disableWAL = input.readBoolean();
-        boolean multiTenant = input.readBoolean();
-        ViewType viewType = null;
-        String viewStatement = null;
-        List<PName> physicalNames = Collections.emptyList();
-        if (tableType == PTableType.VIEW) {
-            viewType = ViewType.fromSerializedValue(input.readByte());
-            byte[] viewStatementBytes = Bytes.readByteArray(input);
-            viewStatement = viewStatementBytes.length == 0 ? null : (String)PDataType.VARCHAR.toObject(viewStatementBytes);
-            int nPhysicalNames = WritableUtils.readVInt(input);
-            physicalNames = Lists.newArrayListWithExpectedSize(nPhysicalNames);
-            for (int i = 0; i < nPhysicalNames; i++) {
-                byte[] physicalNameBytes = Bytes.readByteArray(input);
-                physicalNames.add(PNameFactory.newName(physicalNameBytes));
-            }
-        }
-        PTableStats stats = new PTableStatsImpl(guidePosts);
-        try {
-            init(schemaName, tableName, tableType, indexState, timeStamp, sequenceNumber, pkName,
-                 bucketNum.equals(NO_SALTING) ? null : bucketNum, columns, stats, dataTableName,
-                 indexes, isImmutableRows, physicalNames, defaultFamilyName,
-                 viewStatement, disableWAL, multiTenant, viewType);
-        } catch (SQLException e) {
-            throw new RuntimeException(e); // Impossible
-        }
-    }
-
-    @Override
-    public void write(DataOutput output) throws IOException {
-        Bytes.writeByteArray(output, schemaName.getBytes());
-        Bytes.writeByteArray(output, tableName.getBytes());
-        WritableUtils.writeVInt(output, type.ordinal());
-        if (type == PTableType.INDEX) {
-            WritableUtils.writeVInt(output, state == null ? -1 : state.ordinal());
-        }
-        WritableUtils.writeVLong(output, sequenceNumber);
-        output.writeLong(timeStamp);
-        Bytes.writeByteArray(output, pkName == null ? ByteUtil.EMPTY_BYTE_ARRAY : pkName.getBytes());
-        int offset = 0, nColumns = allColumns.size();
-        if (bucketNum == null) {
-            WritableUtils.writeVInt(output, NO_SALTING);
-        } else {
-            offset = 1;
-            nColumns--;
-            WritableUtils.writeVInt(output, bucketNum);
-        }
-        WritableUtils.writeVInt(output, nColumns);
-        for (int i = offset; i < allColumns.size(); i++) {
-            PColumn column = allColumns.get(i);
-            column.write(output);
-        }
-        WritableUtils.writeVInt(output, indexes.size());
-        for (PTable index: indexes) {
-            index.write(output);
-        }
-        output.writeBoolean(isImmutableRows);
-        stats.write(output);
-        Bytes.writeByteArray(output, parentTableName == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentTableName.getBytes());
-        Bytes.writeByteArray(output, defaultFamilyName == null ? ByteUtil.EMPTY_BYTE_ARRAY : defaultFamilyName.getBytes());
-        output.writeBoolean(disableWAL);
-        output.writeBoolean(multiTenant);
-        if (type == PTableType.VIEW) {
-            output.writeByte(viewType.getSerializedValue());
-            Bytes.writeByteArray(output, viewStatement == null ? ByteUtil.EMPTY_BYTE_ARRAY : PDataType.VARCHAR.toBytes(viewStatement));
-            WritableUtils.writeVInt(output, physicalNames.size());
-            for (int i = 0; i < physicalNames.size(); i++) {
-                Bytes.writeByteArray(output, physicalNames.get(i).getBytes());
-            }
-        }
-    }
-
-    @Override
     public PColumn getPKColumn(String name) throws ColumnNotFoundException {
         List<PColumn> columns = columnsByName.get(name);
         int size = columns.size();
@@ -862,4 +746,140 @@ public class PTableImpl implements PTable {
     public boolean isWALDisabled() {
         return disableWAL;
     }
+    
+    /**
+     * Construct a PTable instance from ProtoBuffered PTable instance
+     * @param table
+     */
+    public static PTable createFromProto(PTableProtos.PTable table) {
+      PName schemaName = PNameFactory.newName(table.getSchemaNameBytes().toByteArray());
+      PName tableName = PNameFactory.newName(table.getTableNameBytes().toByteArray());
+      PTableType tableType = PTableType.values()[table.getTableType().ordinal()];
+      PIndexState indexState = null;
+      if (table.hasIndexState()) {
+        indexState = PIndexState.fromSerializedValue(table.getIndexState());
+      }
+      long sequenceNumber = table.getSequenceNumber();
+      long timeStamp = table.getTimeStamp();
+      PName pkName = null;
+      if (table.hasPkNameBytes()) {
+        pkName = PNameFactory.newName(table.getPkNameBytes().toByteArray());
+      }
+      int bucketNum = table.getBucketNum();
+      List<PColumn> columns = Lists.newArrayListWithExpectedSize(table.getColumnsCount());
+      for (PTableProtos.PColumn curPColumnProto : table.getColumnsList()) {
+        columns.add(PColumnImpl.createFromProto(curPColumnProto));
+      }
+      List<PTable> indexes = Lists.newArrayListWithExpectedSize(table.getIndexesCount());
+      for (PTableProtos.PTable curPTableProto : table.getIndexesList()) {
+        indexes.add(createFromProto(curPTableProto));
+      }
+      boolean isImmutableRows = table.getIsImmutableRows();
+      Map<String, byte[][]> guidePosts = new HashMap<String, byte[][]>();
+      for (PTableProtos.PTableStats pTableStatsProto : table.getGuidePostsList()) {
+        byte[][] value = new byte[pTableStatsProto.getValuesCount()][];
+        for (int j = 0; j < pTableStatsProto.getValuesCount(); j++) {
+          value[j] = pTableStatsProto.getValues(j).toByteArray();
+        }
+        guidePosts.put(pTableStatsProto.getKey(), value);
+      }
+      PName dataTableName = null;
+      if (table.hasDataTableNameBytes()) {
+        dataTableName = PNameFactory.newName(table.getDataTableNameBytes().toByteArray());
+      }
+      PName defaultFamilyName = null;
+      if (table.hasDefaultFamilyName()) {
+        defaultFamilyName = PNameFactory.newName(table.getDefaultFamilyName().toByteArray());
+      }
+      boolean disableWAL = table.getDisableWAL();
+      boolean multiTenant = table.getMultiTenant();
+      ViewType viewType = null;
+      String viewStatement = null;
+      List<PName> physicalNames = Collections.emptyList();
+      if (tableType == PTableType.VIEW) {
+          viewType = ViewType.fromSerializedValue(table.getViewType().toByteArray()[0]);
+          if(table.hasViewStatement()){
+            viewStatement = (String)PDataType.VARCHAR.toObject(table.getViewStatement().toByteArray());
+            physicalNames = Lists.newArrayListWithExpectedSize(table.getPhysicalNamesCount());
+            for(int i = 0; i < table.getPhysicalNamesCount(); i++){
+              physicalNames.add(PNameFactory.newName(table.getPhysicalNames(i).toByteArray()));
+            }
+          }
+      }      
+      PTableStats stats = new PTableStatsImpl(guidePosts);
+      try {
+        PTableImpl result = new PTableImpl();
+        result.init(schemaName, tableName, tableType, indexState, timeStamp, sequenceNumber, pkName,
+          (bucketNum == NO_SALTING) ? null : bucketNum, columns, stats, dataTableName,indexes, isImmutableRows, 
+              physicalNames, defaultFamilyName, viewStatement, disableWAL, multiTenant, viewType);
+        return result;
+      } catch (SQLException e) {
+        throw new RuntimeException(e); // Impossible
+      }
+    }
+
+    public static PTableProtos.PTable toProto(PTable table) {
+      PTableProtos.PTable.Builder builder = PTableProtos.PTable.newBuilder();
+      builder.setSchemaNameBytes(HBaseZeroCopyByteString.wrap(table.getSchemaName().getBytes()));
+      builder.setTableNameBytes(HBaseZeroCopyByteString.wrap(table.getTableName().getBytes()));
+      builder.setTableType(ProtobufUtil.toPTableTypeProto(table.getType()));
+      if (table.getType() == PTableType.INDEX && table.getIndexState() != null) {
+        builder.setIndexState(table.getIndexState().getSerializedValue());
+      }
+      builder.setSequenceNumber(table.getSequenceNumber());
+      builder.setTimeStamp(table.getTimeStamp());
+      PName tmp = table.getPKName();
+      if (tmp != null) {
+        builder.setPkNameBytes(HBaseZeroCopyByteString.wrap(tmp.getBytes()));
+      }
+      Integer bucketNum = table.getBucketNum();
+      int offset = 0;
+      if(bucketNum == null){
+        builder.setBucketNum(NO_SALTING);
+      } else {
+        offset = 1;
+        builder.setBucketNum(bucketNum);
+      }
+      List<PColumn> columns = table.getColumns();
+      int columnSize = columns.size();
+      for (int i = offset; i < columnSize; i++) {
+          PColumn column = columns.get(i);
+          builder.addColumns(PColumnImpl.toProto(column));
+      }
+      List<PTable> indexes = table.getIndexes();
+      for (PTable curIndex : indexes) {
+        builder.addIndexes(toProto(curIndex));
+      }
+      builder.setIsImmutableRows(table.isImmutableRows());
+
+      // build stats
+      Map<String, byte[][]> statsMap = table.getTableStats().getGuidePosts();
+      if(statsMap != null) {
+        for (Entry<String, byte[][]> entry : statsMap.entrySet()) {
+          PTableProtos.PTableStats.Builder statsBuilder = PTableProtos.PTableStats.newBuilder();
+          statsBuilder.setKey(entry.getKey());
+          for (byte[] curVal : entry.getValue()) {
+            statsBuilder.addValues(HBaseZeroCopyByteString.wrap(curVal));
+          }
+          builder.addGuidePosts(statsBuilder.build());
+        }
+      }
+      if (table.getParentName() != null) {
+        builder.setDataTableNameBytes(HBaseZeroCopyByteString.wrap(table.getParentTableName().getBytes()));
+      }
+      if (table.getDefaultFamilyName()!= null) {
+        builder.setDefaultFamilyName(HBaseZeroCopyByteString.wrap(table.getDefaultFamilyName().getBytes()));
+      }
+      builder.setDisableWAL(table.isWALDisabled());
+      builder.setMultiTenant(table.isMultiTenant());
+      if(table.getType() == PTableType.VIEW){
+        builder.setViewType(HBaseZeroCopyByteString.wrap(new byte[]{table.getViewType().getSerializedValue()}));
+        builder.setViewStatement(HBaseZeroCopyByteString.wrap(PDataType.VARCHAR.toBytes(table.getViewStatement())));
+        for (int i = 0; i < table.getPhysicalNames().size(); i++) {
+          builder.addPhysicalNames(HBaseZeroCopyByteString.wrap(table.getPhysicalNames().get(i).getBytes()));
+        }
+      }
+
+      return builder.build();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java
index f88475d..9f95a30 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/Sequence.java
@@ -13,14 +13,13 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.locks.ReentrantLock;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.util.Bytes;
-
-import com.google.common.collect.Lists;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.coprocessor.SequenceRegionObserver;
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -31,6 +30,8 @@ import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
+import com.google.common.collect.Lists;
+
 public class Sequence {
     public static final int SUCCESS = 0;
     
@@ -143,9 +144,9 @@ public class Sequence {
         byte[] opBuf = new byte[] {(byte)SequenceRegionObserver.Op.RETURN_SEQUENCE.ordinal()};
         append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, opBuf);
         append.setAttribute(SequenceRegionObserver.CURRENT_VALUE_ATTRIB, PDataType.LONG.toBytes(value.nextValue));
-        Map<byte[], List<KeyValue>> familyMap = append.getFamilyMap();
-        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<KeyValue>asList(
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, PDataType.LONG.toBytes(value.currentValue))
+        Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
+        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
+                (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, PDataType.LONG.toBytes(value.currentValue))
                 ));
         return append;
     }
@@ -172,9 +173,9 @@ public class Sequence {
         // from the cache that we shouldn't have which will cause a gap in sequence values.
         // In that case, we might get an error that a curr value was done on a sequence
         // before a next val was. Not sure how to prevent that.
-        if (result.raw().length == 1) {
-            KeyValue errorKV = result.raw()[0];
-            int errorCode = PDataType.INTEGER.getCodec().decodeInt(errorKV.getBuffer(), errorKV.getValueOffset(), null);
+        if (result.rawCells().length == 1) {
+            Cell errorKV = result.rawCells()[0];
+            int errorCode = PDataType.INTEGER.getCodec().decodeInt(errorKV.getValueArray(), errorKV.getValueOffset(), null);
             SQLExceptionCode code = SQLExceptionCode.fromErrorCode(errorCode);
             // TODO: We could have the server return the timestamps of the
             // delete markers and we could insert them here, but this seems
@@ -227,28 +228,28 @@ public class Sequence {
     }
     
     public static KeyValue getCurrentValueKV(Result r) {
-        KeyValue[] kvs = r.raw();
+        Cell[] kvs = r.rawCells();
         assert(kvs.length == SEQUENCE_KEY_VALUES);
-        return kvs[CURRENT_VALUE_INDEX];
+        return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(kvs[CURRENT_VALUE_INDEX]);
     }
     
     public static KeyValue getIncrementByKV(Result r) {
-        KeyValue[] kvs = r.raw();
+        Cell[] kvs = r.rawCells();
         assert(kvs.length == SEQUENCE_KEY_VALUES);
-        return kvs[INCREMENT_BY_INDEX];
+        return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(kvs[INCREMENT_BY_INDEX]);
     }
     
     public static KeyValue getCacheSizeKV(Result r) {
-        KeyValue[] kvs = r.raw();
+        Cell[] kvs = r.rawCells();
         assert(kvs.length == SEQUENCE_KEY_VALUES);
-        return kvs[CACHE_SIZE_INDEX];
+        return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(kvs[CACHE_SIZE_INDEX]);
     }
     
     public static Result replaceCurrentValueKV(Result r, KeyValue currentValueKV) {
-        KeyValue[] kvs = r.raw();
-        List<KeyValue> newkvs = Lists.newArrayList(kvs);
+        Cell[] kvs = r.rawCells();
+        List<Cell> newkvs = Lists.newArrayList(kvs);
         newkvs.set(CURRENT_VALUE_INDEX, currentValueKV);
-        return new Result(newkvs);
+        return Result.create(newkvs);
     }
     
     private static final class SequenceValue {
@@ -286,20 +287,20 @@ public class Sequence {
             KeyValue incrementByKV = getIncrementByKV(r);
             KeyValue cacheSizeKV = getCacheSizeKV(r);
             timestamp = currentValueKV.getTimestamp();
-            nextValue = PDataType.LONG.getCodec().decodeLong(currentValueKV.getBuffer(), currentValueKV.getValueOffset(), null);
-            incrementBy = PDataType.LONG.getCodec().decodeLong(incrementByKV.getBuffer(), incrementByKV.getValueOffset(), null);
-            cacheSize = PDataType.INTEGER.getCodec().decodeInt(cacheSizeKV.getBuffer(), cacheSizeKV.getValueOffset(), null);
+            nextValue = PDataType.LONG.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), null);
+            incrementBy = PDataType.LONG.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), null);
+            cacheSize = PDataType.INTEGER.getCodec().decodeInt(cacheSizeKV.getValueArray(), cacheSizeKV.getValueOffset(), null);
             currentValue = nextValue - incrementBy * cacheSize;
         }
     }
 
     public boolean returnValue(Result result) throws SQLException {
-        KeyValue statusKV = result.raw()[0];
+        Cell statusKV = result.rawCells()[0];
         if (statusKV.getValueLength() == 0) { // No error, but unable to return sequence values
             return false;
         }
         long timestamp = statusKV.getTimestamp();
-        int statusCode = PDataType.INTEGER.getCodec().decodeInt(statusKV.getBuffer(), statusKV.getValueOffset(), null);
+        int statusCode = PDataType.INTEGER.getCodec().decodeInt(statusKV.getValueArray(), statusKV.getValueOffset(), null);
         if (statusCode == SUCCESS) {  // Success - update nextValue down to currentValue
             SequenceValue value = findSequenceValue(timestamp);
             if (value == null) {
@@ -327,22 +328,22 @@ public class Sequence {
         if (timestamp != HConstants.LATEST_TIMESTAMP) {
             append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
         }
-        Map<byte[], List<KeyValue>> familyMap = append.getFamilyMap();
+        Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
         byte[] startWithBuf = PDataType.LONG.toBytes(startWith);
-        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<KeyValue>asList(
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PDataType.LONG.toBytes(incrementBy)),
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PDataType.INTEGER.toBytes(cacheSize))
+        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
+                (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
+                (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
+                (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
+                (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PDataType.LONG.toBytes(incrementBy)),
+                (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PDataType.INTEGER.toBytes(cacheSize))
                 ));
         return append;
     }
 
     public long createSequence(Result result) throws SQLException {
-        KeyValue statusKV = result.raw()[0];
+        Cell statusKV = result.rawCells()[0];
         long timestamp = statusKV.getTimestamp();
-        int statusCode = PDataType.INTEGER.getCodec().decodeInt(statusKV.getBuffer(), statusKV.getValueOffset(), null);
+        int statusCode = PDataType.INTEGER.getCodec().decodeInt(statusKV.getValueArray(), statusKV.getValueOffset(), null);
         if (statusCode == 0) {  // Success - add sequence value and return timestamp
             SequenceValue value = new SequenceValue(timestamp);
             insertSequenceValue(value);
@@ -362,16 +363,16 @@ public class Sequence {
         if (timestamp != HConstants.LATEST_TIMESTAMP) {
             append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
         }
-        Map<byte[], List<KeyValue>> familyMap = append.getFamilyMap();
-        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<KeyValue>asList(
-                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY)));
+        Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
+        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(
+                (Cell)KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY)));
         return append;
     }
 
     public long dropSequence(Result result) throws SQLException {
-        KeyValue statusKV = result.raw()[0];
+        Cell statusKV = result.rawCells()[0];
         long timestamp = statusKV.getTimestamp();
-        int statusCode = PDataType.INTEGER.getCodec().decodeInt(statusKV.getBuffer(), statusKV.getValueOffset(), null);
+        int statusCode = PDataType.INTEGER.getCodec().decodeInt(statusKV.getValueArray(), statusKV.getValueOffset(), null);
         SQLExceptionCode code = statusCode == 0 ? null : SQLExceptionCode.fromErrorCode(statusCode);
         if (code == null) {
             // Insert delete marker so that point-in-time sequences work

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java
index d34ec2d..98b8c66 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStats.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.schema.stat;
 
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.Map;
 
 import org.apache.hadoop.hbase.HRegionInfo;
 
@@ -43,5 +44,5 @@ public interface PTableStats {
      */
     byte[][] getRegionGuidePosts(HRegionInfo region);
 
-    void write(DataOutput output) throws IOException;
+    Map<String, byte[][]> getGuidePosts();
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java
index b7d5063..672518a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/PTableStatsImpl.java
@@ -52,19 +52,11 @@ public class PTableStatsImpl implements PTableStats {
     }
 
     @Override
-    public void write(DataOutput output) throws IOException {
-        if (regionGuidePosts == null) {
-            WritableUtils.writeVInt(output, 0);
-            return;
-        }
-        WritableUtils.writeVInt(output, regionGuidePosts.size());
-        for (Entry<String, byte[][]> entry : regionGuidePosts.entrySet()) {
-            WritableUtils.writeString(output, entry.getKey());
-            byte[][] value = entry.getValue();
-            WritableUtils.writeVInt(output, value.length);
-            for (int i=0; i<value.length; i++) {
-                Bytes.writeByteArray(output, value[i]);
-            }
-        }
+    public Map<String, byte[][]> getGuidePosts(){
+      if(regionGuidePosts != null) {
+        return ImmutableMap.copyOf(regionGuidePosts);
+      }
+      
+      return null;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
index fefd435..3395f61 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.schema.tuple;
 
 import java.util.List;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 
@@ -29,23 +30,23 @@ import org.apache.phoenix.util.KeyValueUtil;
 
 
 public class MultiKeyValueTuple implements Tuple {
-    private List<KeyValue> values;
+    private List<Cell> values;
     
-    public MultiKeyValueTuple(List<KeyValue> values) {
+    public MultiKeyValueTuple(List<Cell> values) {
         setKeyValues(values);
     }
     
     public MultiKeyValueTuple() {
     }
 
-    public void setKeyValues(List<KeyValue> values) {
+    public void setKeyValues(List<Cell> values) {
         this.values = ImmutableList.copyOf(values);
     }
     
     @Override
     public void getKey(ImmutableBytesWritable ptr) {
-        KeyValue value = values.get(0);
-        ptr.set(value.getBuffer(), value.getRowOffset(), value.getRowLength());
+        Cell value = values.get(0);
+        ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength());
     }
 
     @Override
@@ -54,7 +55,7 @@ public class MultiKeyValueTuple implements Tuple {
     }
 
     @Override
-    public KeyValue getValue(byte[] family, byte[] qualifier) {
+    public Cell getValue(byte[] family, byte[] qualifier) {
         return KeyValueUtil.getColumnLatest(values, family, qualifier);
     }
 
@@ -69,17 +70,17 @@ public class MultiKeyValueTuple implements Tuple {
     }
 
     @Override
-    public KeyValue getValue(int index) {
+    public Cell getValue(int index) {
         return values.get(index);
     }
 
     @Override
     public boolean getValue(byte[] family, byte[] qualifier,
             ImmutableBytesWritable ptr) {
-        KeyValue kv = getValue(family, qualifier);
+        Cell kv = getValue(family, qualifier);
         if (kv == null)
             return false;
-        ptr.set(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+        ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
         return true;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java
index 419102d..5b492da 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java
@@ -19,7 +19,7 @@
  */
 package org.apache.phoenix.schema.tuple;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 
 /**
@@ -58,7 +58,7 @@ public interface Tuple {
      * @return the KeyValue at the given index
      * @throws IndexOutOfBoundsException if an invalid index is used
      */
-    public KeyValue getValue(int index);
+    public Cell getValue(int index);
     
     /***
      * Get the KeyValue contained by the Tuple with the given family and
@@ -68,7 +68,7 @@ public interface Tuple {
      * @return the KeyValue with the given family and qualifier name or
      * null if not found.
      */
-    public KeyValue getValue(byte [] family, byte [] qualifier);
+    public Cell getValue(byte [] family, byte [] qualifier);
     
     /***
      * Get the value byte array of the KeyValue contained by the Tuple with 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/CSVLoader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVLoader.java b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVLoader.java
index d5b3c9e..9741874 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/CSVLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/CSVLoader.java
@@ -194,7 +194,6 @@ public class CSVLoader {
 	 * PDataType
 	 * 
 	 * @param columns
-	 * @return
 	 * @throws SQLException
 	 */
 	private ColumnInfo[] generateColumnInfo(List<String> columns)

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 5bd74d2..a30e704 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -24,16 +24,14 @@ import java.sql.SQLException;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.index.ValueGetter;
 import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
 import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.client.KeyValueBuilder;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -46,6 +44,8 @@ import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PTable;
 
+import com.google.common.collect.Lists;
+
 public class IndexUtil {
     public static final String INDEX_COLUMN_NAME_SEP = ":";
     public static final byte[] INDEX_COLUMN_NAME_SEP_BYTES = Bytes.toBytes(INDEX_COLUMN_NAME_SEP);
@@ -166,17 +166,17 @@ public class IndexUtil {
                             if (isEmptyKeyValue(table, ref)) {
                                 return null;
                             }
-                            Map<byte [], List<KeyValue>> familyMap = dataMutation.getFamilyMap();
+                            Map<byte [], List<Cell>> familyMap = dataMutation.getFamilyCellMap();
                             byte[] family = ref.getFamily();
-                            List<KeyValue> kvs = familyMap.get(family);
+                            List<Cell> kvs = familyMap.get(family);
                             if (kvs == null) {
                                 return null;
                             }
                             byte[] qualifier = ref.getQualifier();
-                            for (KeyValue kv : kvs) {
-                                if (Bytes.compareTo(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(), family, 0, family.length) == 0 &&
-                                    Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0) {
-                                    return new ImmutableBytesPtr(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+                            for (Cell kv : kvs) {
+                                if (Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), family, 0, family.length) == 0 &&
+                                    Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0) {
+                                    return new ImmutableBytesPtr(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
                                 }
                             }
                             return null;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
index 4e46397..bb56d02 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
@@ -23,6 +23,7 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Result;
@@ -74,6 +75,16 @@ public class KeyValueUtil {
                 ts, Type.Put,
                 value, valueOffset, valueLength);
     }
+    
+    public static KeyValue newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, 
+        int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength, long ts, byte[] value, 
+        int valueOffset, int valueLength) {
+        return new KeyValue(key, keyOffset, keyLength,
+                cf, cfOffset, cfLength,
+                cq, cqOffset, cqLength,
+                ts, Type.Put,
+                value, valueOffset, valueLength);
+    }
 
     public static KeyValue newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value) {
         return newKeyValue(key,cf,cq,ts,value,0,value.length);
@@ -89,12 +100,12 @@ public class KeyValueUtil {
      * @param family
      * @param qualifier
      */
-    public static KeyValue getColumnLatest(List<KeyValue>kvs, byte[] family, byte[] qualifier) {
+    public static Cell getColumnLatest(List<Cell>kvs, byte[] family, byte[] qualifier) {
         if (kvs.size() == 0) {
         	return null;
         }
-        KeyValue row = kvs.get(0);
-        Comparator<KeyValue> comp = new SearchComparator(row.getBuffer(), row.getRowOffset(), row.getRowLength(), family, qualifier);
+        Cell row = kvs.get(0);
+        Comparator<Cell> comp = new SearchComparator(row.getRowArray(), row.getRowOffset(), row.getRowLength(), family, qualifier);
         // pos === ( -(insertion point) - 1)
         int pos = Collections.binarySearch(kvs, null, comp);
         // never will exact match
@@ -106,12 +117,12 @@ public class KeyValueUtil {
           return null; // doesn't exist
         }
     
-        KeyValue kv = kvs.get(pos);
-        if (Bytes.compareTo(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(),
+        Cell kv = kvs.get(pos);
+        if (Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(),
                 family, 0, family.length) != 0) {
             return null;
         }
-        if (Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength(),
+        if (Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(),
                 qualifier, 0, qualifier.length) != 0) {
             return null;
         }
@@ -124,7 +135,7 @@ public class KeyValueUtil {
      * Making use of that saves instanceof checks, and allows us
      * to inline the search term in the comparator
      */
-	private static class SearchComparator implements Comparator<KeyValue> {
+	private static class SearchComparator implements Comparator<Cell> {
 		private final byte[] row;
 		private final byte[] family;
 		private final byte[] qualifier;
@@ -140,9 +151,9 @@ public class KeyValueUtil {
 		}
 
 		@Override
-        public int compare(final KeyValue l, final KeyValue ignored) {
+    public int compare(final Cell l, final Cell ignored) {
 			assert ignored == null;
-			final byte[] buf = l.getBuffer();
+			final byte[] buf = l.getRowArray();
 			final int rOff = l.getRowOffset();
 			final short rLen = l.getRowLength();
 			// row
@@ -151,15 +162,15 @@ public class KeyValueUtil {
 				return val;
 			}
 			// family
-			final int fOff = l.getFamilyOffset(rLen);
-			final byte fLen = l.getFamilyLength(fOff);
-			val = Bytes.compareTo(buf, fOff, fLen, family, 0, family.length);
+			final int fOff = l.getFamilyOffset();
+			final byte fLen = l.getFamilyLength();
+			val = Bytes.compareTo(l.getFamilyArray(), fOff, fLen, family, 0, family.length);
 			if (val != 0) {
 				return val;
 			}
 			// qualifier
-			val = Bytes.compareTo(buf, l.getQualifierOffset(fOff),
-					l.getQualifierLength(rLen, fLen), qualifier, 0, qualifier.length);
+			val = Bytes.compareTo(l.getQualifierArray(), l.getQualifierOffset(),
+					l.getQualifierLength(), qualifier, 0, qualifier.length);
 			if (val != 0) {
 				return val;
 			}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 3e78726..5c92aed 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -24,6 +24,7 @@ import static org.apache.phoenix.util.SchemaUtil.getVarChars;
 import java.util.Collection;
 import java.util.List;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -151,11 +152,11 @@ public class MetaDataUtil {
     }
     
     public static long getSequenceNumber(Mutation tableMutation) {
-        List<KeyValue> kvs = tableMutation.getFamilyMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES);
+        List<Cell> kvs = tableMutation.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES);
         if (kvs != null) {
-            for (KeyValue kv : kvs) { // list is not ordered, so search. TODO: we could potentially assume the position
-                if (Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength(), PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, 0, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES.length) == 0) {
-                    return PDataType.LONG.getCodec().decodeLong(kv.getBuffer(), kv.getValueOffset(), null);
+            for (Cell kv : kvs) { // list is not ordered, so search. TODO: we could potentially assume the position
+                if (Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, 0, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES.length) == 0) {
+                    return PDataType.LONG.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), null);
                 }
             }
         }
@@ -167,8 +168,8 @@ public class MetaDataUtil {
     }
     
     public static PTableType getTableType(List<Mutation> tableMetaData) {
-        KeyValue kv = getMutationKeyValue(getPutOnlyTableHeaderRow(tableMetaData), PhoenixDatabaseMetaData.TABLE_TYPE_BYTES);
-        return kv == null ? null : PTableType.fromSerializedValue(kv.getBuffer()[kv.getValueOffset()]);
+        Cell kv = getMutationKeyValue(getPutOnlyTableHeaderRow(tableMetaData), PhoenixDatabaseMetaData.TABLE_TYPE_BYTES);
+        return kv == null ? null : PTableType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]);
     }
     
     public static long getParentSequenceNumber(List<Mutation> tableMetaData) {
@@ -186,11 +187,11 @@ public class MetaDataUtil {
     }
 
     private static KeyValue getMutationKeyValue(Mutation headerRow, byte[] key) {
-        List<KeyValue> kvs = headerRow.getFamilyMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES);
+        List<Cell> kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES);
         if (kvs != null) {
-            for (KeyValue kv : kvs) {
-                if (Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength(), key, 0,
-                        key.length) == 0) { return kv; }
+            for (Cell kv : kvs) {
+                if (Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), key, 0,
+                        key.length) == 0) { return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(kv); }
             }
         }
         return null;
@@ -217,7 +218,7 @@ public class MetaDataUtil {
     }    
 
     public static long getClientTimeStamp(Mutation m) {
-        Collection<List<KeyValue>> kvs = m.getFamilyMap().values();
+        Collection<List<Cell>> kvs = m.getFamilyCellMap().values();
         // Empty if Mutation is a Delete
         // TODO: confirm that Delete timestamp is reset like Put
         return kvs.isEmpty() ? m.getTimeStamp() : kvs.iterator().next().get(0).getTimestamp();

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index 8b35935..d7d8e5d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -33,6 +33,7 @@ import java.util.List;
 import java.util.Properties;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -298,9 +299,9 @@ public class PhoenixRuntime {
                 Pair<byte[],List<Mutation>> pair = iterator.next();
                 List<KeyValue> keyValues = Lists.newArrayListWithExpectedSize(pair.getSecond().size() * 5); // Guess-timate 5 key values per row
                 for (Mutation mutation : pair.getSecond()) {
-                    for (List<KeyValue> keyValueList : mutation.getFamilyMap().values()) {
-                        for (KeyValue keyValue : keyValueList) {
-                            keyValues.add(keyValue);
+                    for (List<Cell> keyValueList : mutation.getFamilyCellMap().values()) {
+                        for (Cell keyValue : keyValueList) {
+                            keyValues.add(org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(keyValue));
                         }
                     }
                 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
index 7c697d6..3e25e16 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
@@ -19,9 +19,12 @@
  */
 package org.apache.phoenix.util;
 
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Comparator;
+import java.util.List;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -43,6 +46,20 @@ public class ResultUtil {
     private ResultUtil() {
     }
     
+    public static Result toResult(ImmutableBytesWritable bytes) {
+        byte [] buf = bytes.get();
+        int offset = bytes.getOffset();
+        int finalOffset = bytes.getSize() + offset;
+        List<Cell> kvs = new ArrayList<Cell>();
+        while(offset < finalOffset) {
+          int keyLength = Bytes.toInt(buf, offset);
+          offset += Bytes.SIZEOF_INT;
+          kvs.add(new KeyValue(buf, offset, keyLength));
+          offset += keyLength;
+        }
+        return Result.create(kvs);
+    }
+    
     /**
      * Return a pointer into a potentially much bigger byte buffer that points to the key of a Result.
      * @param r
@@ -92,11 +109,10 @@ public class ResultUtil {
     /**
      * Get the offset into the Result byte array to the key.
      * @param r
-     * @return
      */
     static int getKeyOffset(Result r) {
-        // Special case for when Result was instantiated via KeyValue array (no bytes in that case) versus returned from a scanner
-        return (r.getBytes() == null ? r.raw()[0].getOffset() : (r.getBytes().getOffset() + Bytes.SIZEOF_INT /* KV length in Result */)) + KeyValue.ROW_OFFSET /* key offset in KV */ + Bytes.SIZEOF_SHORT /* key length */;
+        KeyValue firstKV = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(r.rawCells()[0]);
+        return firstKV.getOffset();
     }
     
     static int getKeyLength(Result r) {
@@ -105,11 +121,8 @@ public class ResultUtil {
     }
     
     static byte[] getRawBytes(Result r) {
-        // Handle special case for when Result was instantiated via KeyValue array (no bytes in that case) versus returned from a scanner
-        ImmutableBytesWritable rPtr = r.getBytes();
-        if (rPtr != null)
-            return rPtr.get();
-        return r.raw()[0].getBuffer();
+        KeyValue firstKV = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(r.rawCells()[0]);
+        return firstKV.getBuffer();
     }
 
     public static int compareKeys(Result r1, Result r2) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index c4ca7d6..d0ab434 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -54,8 +54,6 @@ import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.ValueSchema.Field;
 
-
-
 /**
  * 
  * Static class for various schema-related utilities

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 99850b9..76cf186 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -44,6 +44,10 @@ public class ServerUtil {
     }
 
     public static void throwIOException(String msg, Throwable t) throws IOException {
+        throw createIOException(msg, t);
+    }
+
+    public static IOException createIOException(String msg, Throwable t) {
         // First unwrap SQLExceptions if it's root cause is an IOException.
         if (t instanceof SQLException) {
             Throwable cause = t.getCause();
@@ -53,18 +57,18 @@ public class ServerUtil {
         }
         // Throw immediately if DoNotRetryIOException
         if (t instanceof DoNotRetryIOException) {
-            throw (DoNotRetryIOException)t;
+            return (DoNotRetryIOException) t;
         } else if (t instanceof IOException) {
             // If the IOException does not wrap any exception, then bubble it up.
             Throwable cause = t.getCause();
             if (cause == null || cause instanceof IOException) {
-                throw (IOException)t;
+                return (IOException) t;
             }
             // Else assume it's been wrapped, so throw as DoNotRetryIOException to prevent client hanging while retrying
-            throw new DoNotRetryIOException(t.getMessage(), cause);
+            return new DoNotRetryIOException(t.getMessage(), cause);
         } else if (t instanceof SQLException) {
             // If it's already an SQLException, construct an error message so we can parse and reconstruct on the client side.
-            throw new DoNotRetryIOException(constructSQLErrorMessage((SQLException) t, msg), t);
+            return new DoNotRetryIOException(constructSQLErrorMessage((SQLException) t, msg), t);
         } else {
             // Not a DoNotRetryIOException, IOException or SQLException. Map the exception type to a general SQLException 
             // and construct the error message so it can be reconstruct on the client side.
@@ -72,9 +76,9 @@ public class ServerUtil {
             // If no mapping exists, rethrow it as a generic exception.
             SQLExceptionCode code = errorcodeMap.get(t.getClass());
             if (code == null) {
-                throw new DoNotRetryIOException(msg + ": " + t.getMessage(), t);
+                return new DoNotRetryIOException(msg + ": " + t.getMessage(), t);
             } else {
-                throw new DoNotRetryIOException(constructSQLErrorMessage(code, t, msg), t);
+                return new DoNotRetryIOException(constructSQLErrorMessage(code, t, msg), t);
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
index dc42d8c..d4ae7b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
@@ -26,6 +26,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -79,10 +80,10 @@ public class TupleUtil {
      */
     public static void getAggregateValue(Tuple r, ImmutableBytesWritable ptr) {
         if (r.size() == 1) {
-            KeyValue kv = r.getValue(0); // Just one KV for aggregation
-            if (Bytes.compareTo(SINGLE_COLUMN_FAMILY, 0, SINGLE_COLUMN_FAMILY.length, kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength()) == 0) {
-                if (Bytes.compareTo(SINGLE_COLUMN, 0, SINGLE_COLUMN.length, kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()) == 0) {
-                    ptr.set(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
+            Cell kv = r.getValue(0); // Just one KV for aggregation
+            if (Bytes.compareTo(SINGLE_COLUMN_FAMILY, 0, SINGLE_COLUMN_FAMILY.length, kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()) == 0) {
+                if (Bytes.compareTo(SINGLE_COLUMN, 0, SINGLE_COLUMN.length, kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()) == 0) {
+                    ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
                     return;
                 }
             }
@@ -139,14 +140,14 @@ public class TupleUtil {
     public static int write(Tuple result, DataOutput out) throws IOException {
         int size = 0;
         for(int i = 0; i < result.size(); i++) {
-            KeyValue kv = result.getValue(i);
+            KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(result.getValue(i));
             size += kv.getLength();
             size += Bytes.SIZEOF_INT; // kv.getLength
           }
 
         WritableUtils.writeVInt(out, size);
         for(int i = 0; i < result.size(); i++) {
-            KeyValue kv = result.getValue(i);
+            KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(result.getValue(i));
             out.writeInt(kv.getLength());
             out.write(kv.getBuffer(), kv.getOffset(), kv.getLength());
           }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java
index 6acb8bc..78ee897 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/IndexTestingUtils.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
-import org.apache.hadoop.hbase.regionserver.wal.WALEditCodec;
+import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Bytes;
 
 
@@ -57,7 +57,7 @@ public class IndexTestingUtils {
       conf.setInt(MASTER_INFO_PORT_KEY, -1);
       conf.setInt(RS_INFO_PORT_KEY, -1);
     // setup our codec, so we get proper replay/write
-      conf.set(WALEditCodec.WAL_EDIT_CODEC_CLASS_KEY, IndexedWALEditCodec.class.getName());
+      conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, IndexedWALEditCodec.class.getName());
   }
   /**
    * Verify the state of the index table between the given key and time ranges against the list of

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
index 559c161..9bff7fc 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/TestFailForUnsupportedHBaseVersions.java
@@ -116,6 +116,8 @@ public class TestFailForUnsupportedHBaseVersions {
 
     // start the minicluster
     HBaseTestingUtility util = new HBaseTestingUtility(conf);
+    // disable replication
+    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
     util.startMiniCluster();
 
     // setup the primary table

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
index a671550..99ea0c7 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestEndToEndCoveredColumnsIndexBuilder.java
@@ -33,8 +33,10 @@ import java.util.Map;
 import java.util.Queue;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -100,6 +102,8 @@ public class TestEndToEndCoveredColumnsIndexBuilder {
     // disable version checking, so we can test against whatever version of HBase happens to be
     // installed (right now, its generally going to be SNAPSHOT versions).
     conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
+    // disable replication
+    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
     UTIL.startMiniCluster();
   }
 
@@ -130,11 +134,11 @@ public class TestEndToEndCoveredColumnsIndexBuilder {
    */
   private class ListMatchingVerifier implements TableStateVerifier {
 
-    private List<KeyValue> expectedKvs;
+    private List<Cell> expectedKvs;
     private ColumnReference[] columns;
     private String msg;
 
-    public ListMatchingVerifier(String msg, List<KeyValue> kvs, ColumnReference... columns) {
+    public ListMatchingVerifier(String msg, List<Cell> kvs, ColumnReference... columns) {
       this.expectedKvs = kvs;
       this.columns = columns;
       this.msg = msg;
@@ -149,7 +153,7 @@ public class TestEndToEndCoveredColumnsIndexBuilder {
         int count = 0;
         KeyValue kv;
         while ((kv = kvs.next()) != null) {
-          KeyValue next = expectedKvs.get(count++);
+          Cell next = expectedKvs.get(count++);
           assertEquals(
             msg + ": Unexpected kv in table state!\nexpected v1: "
                 + Bytes.toString(next.getValue()) + "\nactual v1:" + Bytes.toString(kv.getValue()),
@@ -199,8 +203,8 @@ public class TestEndToEndCoveredColumnsIndexBuilder {
     p.add(family, qual, Bytes.toBytes("v1"));
     
     // get all the underlying kvs for the put
-    final List<KeyValue> expectedKvs = new ArrayList<KeyValue>();
-    final List<KeyValue> allKvs = new ArrayList<KeyValue>();
+    final List<Cell> expectedKvs = new ArrayList<Cell>();
+    final List<Cell> allKvs = new ArrayList<Cell>();
     allKvs.addAll(p.getFamilyMap().get(family));
 
     // setup the verifier for the data we expect to write
@@ -255,9 +259,9 @@ public class TestEndToEndCoveredColumnsIndexBuilder {
     // since we need to iterate the batch.
 
     // get all the underlying kvs for the put
-    final List<KeyValue> allKvs = new ArrayList<KeyValue>(2);
-    allKvs.addAll(p2.getFamilyMap().get(family));
-    allKvs.addAll(p1.getFamilyMap().get(family));
+    final List<Cell> allKvs = new ArrayList<Cell>(2);
+    allKvs.addAll(p2.getFamilyCellMap().get(family));
+    allKvs.addAll(p1.getFamilyCellMap().get(family));
 
     // setup the verifier for the data we expect to write
     // both puts should be put into a single batch
@@ -266,11 +270,11 @@ public class TestEndToEndCoveredColumnsIndexBuilder {
     VerifyingIndexCodec codec = state.codec;
     // no previous state in the table
     codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections
-        .<KeyValue> emptyList(), familyRef));
-    codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyMap().get(family),
+        .<Cell> emptyList(), familyRef));
+    codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyCellMap().get(family),
         familyRef));
 
-    codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyMap().get(family),
+    codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyCellMap().get(family),
         familyRef));
     // kvs from both puts should be in the table now
     codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef));

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java
index 66b4561..0bbdc6e 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/TestLocalTableState.java
@@ -26,6 +26,7 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
@@ -75,7 +76,7 @@ public class TestLocalTableState {
       public Boolean answer(InvocationOnMock invocation) throws Throwable {
         List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
         KeyValue kv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
-        kv.setMemstoreTS(0);
+        kv.setMvccVersion(0);
         list.add(kv);
         return false;
       }
@@ -85,7 +86,7 @@ public class TestLocalTableState {
     LocalHBaseState state = new LocalTable(env);
     LocalTableState table = new LocalTableState(env, state, m);
     //add the kvs from the mutation
-    table.addPendingUpdates(m.get(fam, qual));
+    table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
 
     // setup the lookup
     ColumnReference col = new ColumnReference(fam, qual);
@@ -114,7 +115,7 @@ public class TestLocalTableState {
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
     final byte[] stored = Bytes.toBytes("stored-value");
     final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
-    storedKv.setMemstoreTS(2);
+    storedKv.setMvccVersion(2);
     Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
       @Override
       public Boolean answer(InvocationOnMock invocation) throws Throwable {
@@ -127,8 +128,8 @@ public class TestLocalTableState {
     LocalHBaseState state = new LocalTable(env);
     LocalTableState table = new LocalTableState(env, state, m);
     // add the kvs from the mutation
-    KeyValue kv = m.get(fam, qual).get(0);
-    kv.setMemstoreTS(0);
+    KeyValue kv = KeyValueUtil.ensureKeyValue(m.get(fam, qual).get(0));
+    kv.setMvccVersion(0);
     table.addPendingUpdates(kv);
 
     // setup the lookup
@@ -160,7 +161,7 @@ public class TestLocalTableState {
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
     final KeyValue storedKv =
         new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value"));
-    storedKv.setMemstoreTS(2);
+    storedKv.setMvccVersion(2);
     Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {
       @Override
       public Boolean answer(InvocationOnMock invocation) throws Throwable {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java
index 6e1ec67..4d3fefa 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/data/TestIndexMemStore.java
@@ -41,9 +41,9 @@ public class TestIndexMemStore {
     IndexMemStore store = new IndexMemStore(IndexMemStore.COMPARATOR);
     long ts = 10;
     KeyValue kv = new KeyValue(row, family, qual, ts, Type.Put, val);
-    kv.setMemstoreTS(2);
+    kv.setMvccVersion(2);
     KeyValue kv2 = new KeyValue(row, family, qual, ts, Type.Put, val2);
-    kv2.setMemstoreTS(0);
+    kv2.setMvccVersion(0);
     store.add(kv, true);
     // adding the exact same kv shouldn't change anything stored if not overwritting
     store.add(kv2, false);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java
index 991d6eb..ddf749a 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java
@@ -32,7 +32,9 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -154,7 +156,7 @@ public class TestCoveredColumnIndexCodec {
     ColumnGroup group = new ColumnGroup("test-column-group");
     group.add(COLUMN_REF);
 
-    final Result emptyState = new Result(Collections.<KeyValue> emptyList());
+    final Result emptyState = Result.create(Collections.<Cell> emptyList());
     
     // setup the state we expect for the codec
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
@@ -169,7 +171,7 @@ public class TestCoveredColumnIndexCodec {
     // start with a basic put that has some keyvalues
     Put p = new Put(PK);
     // setup the kvs to add
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
+    List<Cell> kvs = new ArrayList<Cell>();
     byte[] v1 = Bytes.toBytes("v1");
     KeyValue kv = new KeyValue(PK, FAMILY, QUAL, 1, v1);
     kvs.add(kv);
@@ -187,7 +189,7 @@ public class TestCoveredColumnIndexCodec {
 
     // get the updates with the pending update
     state.setCurrentTimestamp(1);
-    state.addPendingUpdates(kvs);
+    state.addPendingUpdates(KeyValueUtil.ensureKeyValues(kvs));
     updates = codec.getIndexUpserts(state);
     assertTrue("Didn't find index updates for pending primary table update!", updates.iterator()
         .hasNext());
@@ -207,7 +209,7 @@ public class TestCoveredColumnIndexCodec {
     d.deleteFamily(FAMILY, 2);
     // setup the next batch of 'current state', basically just ripping out the current state from
     // the last round
-    table = new SimpleTableState(new Result(kvs));
+    table = new SimpleTableState(Result.create(kvs));
     state = new LocalTableState(env, table, d);
     state.setCurrentTimestamp(2);
     // check the cleanup of the current table, after the puts (mocking a 'next' update)
@@ -234,13 +236,13 @@ public class TestCoveredColumnIndexCodec {
     ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
   }
 
-  private void ensureNoUpdatesWhenCoveredByDelete(RegionCoprocessorEnvironment env, IndexCodec codec, List<KeyValue> currentState,
+  private void ensureNoUpdatesWhenCoveredByDelete(RegionCoprocessorEnvironment env, IndexCodec codec, List<Cell> currentState,
       Delete d) throws IOException {
-    LocalHBaseState table = new SimpleTableState(new Result(currentState));
+    LocalHBaseState table = new SimpleTableState(Result.create(currentState));
     LocalTableState state = new LocalTableState(env, table, d);
     state.setCurrentTimestamp(d.getTimeStamp());
     // now we shouldn't see anything when getting the index update
-    state.addPendingUpdates(d.getFamilyMap().get(FAMILY));
+    state.addPendingUpdates(KeyValueUtil.ensureKeyValues(d.getFamilyCellMap().get(FAMILY)));
     Iterable<IndexUpdate> updates = codec.getIndexUpserts(state);
     for (IndexUpdate update : updates) {
       assertFalse("Had some index updates, though it should have been covered by the delete",

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/53f7d3ce/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
index d186e38..69aff9d 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
@@ -106,6 +106,8 @@ public class TestEndToEndCoveredIndexing {
     // disable version checking, so we can test against whatever version of HBase happens to be
     // installed (right now, its generally going to be SNAPSHOT versions).
     conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
+    // disable replication
+    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
     UTIL.startMiniCluster();
   }