You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by mu...@apache.org on 2014/02/15 01:07:46 UTC

[13/15] Rename package from org.apache.hadoop.hbase.index.* to org.apache.phoenix.index.* to fix classloader issue causing mutable index performance regression - https://issues.apache.org/jira/browse/PHOENIX-38

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexCodec.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexCodec.java
deleted file mode 100644
index 8f0ee99..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexCodec.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.example;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.index.covered.IndexUpdate;
-import org.apache.hadoop.hbase.index.covered.TableState;
-import org.apache.hadoop.hbase.index.scanner.Scanner;
-import org.apache.phoenix.index.BaseIndexCodec;
-
-/**
- *
- */
-public class CoveredColumnIndexCodec extends BaseIndexCodec {
-
-  private static final byte[] EMPTY_BYTES = new byte[0];
-  public static final byte[] INDEX_ROW_COLUMN_FAMILY = Bytes.toBytes("INDEXED_COLUMNS");
-
-  private List<ColumnGroup> groups;
-
-  /**
-   * @param groups to initialize the codec with
-   * @return an instance that is initialized with the given {@link ColumnGroup}s, for testing
-   *         purposes
-   */
-  public static CoveredColumnIndexCodec getCodecForTesting(List<ColumnGroup> groups) {
-    CoveredColumnIndexCodec codec = new CoveredColumnIndexCodec();
-    codec.groups = Lists.newArrayList(groups);
-    return codec;
-  }
-
-  @Override
-  public void initialize(RegionCoprocessorEnvironment env) {
-    groups = CoveredColumnIndexSpecifierBuilder.getColumns(env.getConfiguration());
-  }
-
-  @Override
-  public Iterable<IndexUpdate> getIndexUpserts(TableState state) {
-    List<IndexUpdate> updates = new ArrayList<IndexUpdate>();
-    for (ColumnGroup group : groups) {
-      IndexUpdate update = getIndexUpdateForGroup(group, state);
-      updates.add(update);
-    }
-    return updates;
-  }
-
-  /**
-   * @param group
-   * @param state
-   * @return the update that should be made to the table
-   */
-  private IndexUpdate getIndexUpdateForGroup(ColumnGroup group, TableState state) {
-    List<CoveredColumn> refs = group.getColumns();
-    try {
-      Pair<Scanner, IndexUpdate> stateInfo = state.getIndexedColumnsTableState(refs);
-      Scanner kvs = stateInfo.getFirst();
-      Pair<Integer, List<ColumnEntry>> columns =
-          getNextEntries(refs, kvs, state.getCurrentRowKey());
-      // make sure we close the scanner
-      kvs.close();
-      if (columns.getFirst().intValue() == 0) {
-        return stateInfo.getSecond();
-      }
-      // have all the column entries, so just turn it into a Delete for the row
-      // convert the entries to the needed values
-      byte[] rowKey =
-          composeRowKey(state.getCurrentRowKey(), columns.getFirst(), columns.getSecond());
-      Put p = new Put(rowKey, state.getCurrentTimestamp());
-      // add the columns to the put
-      addColumnsToPut(p, columns.getSecond());
-
-      // update the index info
-      IndexUpdate update = stateInfo.getSecond();
-      update.setTable(Bytes.toBytes(group.getTable()));
-      update.setUpdate(p);
-      return update;
-    } catch (IOException e) {
-      throw new RuntimeException("Unexpected exception when getting state for columns: " + refs);
-    }
-  }
-
-  private static void addColumnsToPut(Put indexInsert, List<ColumnEntry> columns) {
-    // add each of the corresponding families to the put
-    int count = 0;
-    for (ColumnEntry column : columns) {
-      indexInsert.add(INDEX_ROW_COLUMN_FAMILY,
-        ArrayUtils.addAll(Bytes.toBytes(count++), toIndexQualifier(column.ref)), null);
-    }
-  }
-
-  private static byte[] toIndexQualifier(CoveredColumn column) {
-    return ArrayUtils.addAll(Bytes.toBytes(column.familyString + CoveredColumn.SEPARATOR),
-      column.getQualifier());
-  }
-
-  @Override
-  public Iterable<IndexUpdate> getIndexDeletes(TableState state) {
-    List<IndexUpdate> deletes = new ArrayList<IndexUpdate>();
-    for (ColumnGroup group : groups) {
-      deletes.add(getDeleteForGroup(group, state));
-    }
-    return deletes;
-  }
-
-
-  /**
-   * Get all the deletes necessary for a group of columns - logically, the cleanup the index table
-   * for a given index.
-   * @param group index information
-   * @return the cleanup for the given index, or <tt>null</tt> if no cleanup is necessary
-   */
-  private IndexUpdate getDeleteForGroup(ColumnGroup group, TableState state) {
-    List<CoveredColumn> refs = group.getColumns();
-    try {
-      Pair<Scanner, IndexUpdate> kvs = state.getIndexedColumnsTableState(refs);
-      Pair<Integer, List<ColumnEntry>> columns =
-          getNextEntries(refs, kvs.getFirst(), state.getCurrentRowKey());
-      // make sure we close the scanner reference
-      kvs.getFirst().close();
-      // no change, just return the passed update
-      if (columns.getFirst() == 0) {
-        return kvs.getSecond();
-      }
-      // have all the column entries, so just turn it into a Delete for the row
-      // convert the entries to the needed values
-      byte[] rowKey =
-          composeRowKey(state.getCurrentRowKey(), columns.getFirst(), columns.getSecond());
-      Delete d = new Delete(rowKey);
-      d.setTimestamp(state.getCurrentTimestamp());
-      IndexUpdate update = kvs.getSecond();
-      update.setUpdate(d);
-      update.setTable(Bytes.toBytes(group.getTable()));
-      return update;
-    } catch (IOException e) {
-      throw new RuntimeException("Unexpected exception when getting state for columns: " + refs);
-    }
-  }
-
-  /**
-   * Get the next batch of primary table values for the given columns
-   * @param refs columns to match against
-   * @param state
-   * @return the total length of all values found and the entries to add for the index
-   */
-  private Pair<Integer, List<ColumnEntry>> getNextEntries(List<CoveredColumn> refs, Scanner kvs,
-      byte[] currentRow) throws IOException {
-    int totalValueLength = 0;
-    List<ColumnEntry> entries = new ArrayList<ColumnEntry>(refs.size());
-
-    // pull out the latest state for each column reference, in order
-    for (CoveredColumn ref : refs) {
-      KeyValue first = ref.getFirstKeyValueForRow(currentRow);
-      if (!kvs.seek(first)) {
-        // no more keys, so add a null value
-        entries.add(new ColumnEntry(null, ref));
-        continue;
-      }
-      // there is a next value - we only care about the current value, so we can just snag that
-      KeyValue next = kvs.next();
-      if (ref.matchesFamily(next.getFamily()) && ref.matchesQualifier(next.getQualifier())) {
-        byte[] v = next.getValue();
-        totalValueLength += v.length;
-        entries.add(new ColumnEntry(v, ref));
-      } else {
-        // this first one didn't match at all, so we have to put in a null entry
-        entries.add(new ColumnEntry(null, ref));
-        continue;
-      }
-      // here's where is gets a little tricky - we either need to decide if we should continue
-      // adding entries (matches all qualifiers) or if we are done (matches a single qualifier)
-      if (!ref.allColumns()) {
-        continue;
-      }
-      // matches all columns, so we need to iterate until we hit the next column with the same
-      // family as the current key
-      byte[] lastQual = next.getQualifier();
-      byte[] nextQual = null;
-      while ((next = kvs.next()) != null) {
-        // different family, done with this column
-        if (!ref.matchesFamily(next.getFamily())) {
-          break;
-        }
-        nextQual = next.getQualifier();
-        // we are still on the same qualifier - skip it, since we already added a column for it
-        if (Arrays.equals(lastQual, nextQual)) {
-          continue;
-        }
-        // this must match the qualifier since its an all-qualifiers specifier, so we add it
-        byte[] v = next.getValue();
-        totalValueLength += v.length;
-        entries.add(new ColumnEntry(v, ref));
-        // update the last qualifier to check against
-        lastQual = nextQual;
-      }
-    }
-    return new Pair<Integer, List<ColumnEntry>>(totalValueLength, entries);
-  }
-
-  static class ColumnEntry {
-    byte[] value = EMPTY_BYTES;
-    CoveredColumn ref;
-
-    public ColumnEntry(byte[] value, CoveredColumn ref) {
-      this.value = value == null ? EMPTY_BYTES : value;
-      this.ref = ref;
-    }
-  }
-
-  /**
-   * Compose the final index row key.
-   * <p>
-   * This is faster than adding each value independently as we can just build a single a array and
-   * copy everything over once.
-   * @param pk primary key of the original row
-   * @param length total number of bytes of all the values that should be added
-   * @param values to use when building the key
-   * @return
-   */
-  static byte[] composeRowKey(byte[] pk, int length, List<ColumnEntry> values) {
-    // now build up expected row key, each of the values, in order, followed by the PK and then some
-    // info about lengths so we can deserialize each value
-    byte[] output = new byte[length + pk.length];
-    int pos = 0;
-    int[] lengths = new int[values.size()];
-    int i = 0;
-    for (ColumnEntry entry : values) {
-      byte[] v = entry.value;
-      // skip doing the copy attempt, if we don't need to
-      if (v.length != 0) {
-        System.arraycopy(v, 0, output, pos, v.length);
-        pos += v.length;
-      }
-      lengths[i++] = v.length;
-    }
-
-    // add the primary key to the end of the row key
-    System.arraycopy(pk, 0, output, pos, pk.length);
-
-    // add the lengths as suffixes so we can deserialize the elements again
-    for (int l : lengths) {
-      output = ArrayUtils.addAll(output, Bytes.toBytes(l));
-    }
-
-    // and the last integer is the number of values
-    return ArrayUtils.addAll(output, Bytes.toBytes(values.size()));
-  }
-
-  /**
-   * Essentially a short-cut from building a {@link Put}.
-   * @param pk row key
-   * @param timestamp timestamp of all the keyvalues
-   * @param values expected value--column pair
-   * @return a keyvalues that the index contains for a given row at a timestamp with the given value
-   *         -- column pairs.
-   */
-  public static List<KeyValue> getIndexKeyValueForTesting(byte[] pk, long timestamp,
-      List<Pair<byte[], CoveredColumn>> values) {
-  
-    int length = 0;
-    List<ColumnEntry> expected = new ArrayList<ColumnEntry>(values.size());
-    for (Pair<byte[], CoveredColumn> value : values) {
-      ColumnEntry entry = new ColumnEntry(value.getFirst(), value.getSecond());
-      length += value.getFirst().length;
-      expected.add(entry);
-    }
-  
-    byte[] rowKey = CoveredColumnIndexCodec.composeRowKey(pk, length, expected);
-    Put p = new Put(rowKey, timestamp);
-    CoveredColumnIndexCodec.addColumnsToPut(p, expected);
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    for (Entry<byte[], List<KeyValue>> entry : p.getFamilyMap().entrySet()) {
-      kvs.addAll(entry.getValue());
-    }
-  
-    return kvs;
-  }
-
-  public static List<byte[]> getValues(byte[] bytes) {
-    // get the total number of keys in the bytes
-    int keyCount = CoveredColumnIndexCodec.getPreviousInteger(bytes, bytes.length);
-    List<byte[]> keys = new ArrayList<byte[]>(keyCount);
-    int[] lengths = new int[keyCount];
-    int lengthPos = keyCount - 1;
-    int pos = bytes.length - Bytes.SIZEOF_INT;
-    // figure out the length of each key
-    for (int i = 0; i < keyCount; i++) {
-      lengths[lengthPos--] = CoveredColumnIndexCodec.getPreviousInteger(bytes, pos);
-      pos -= Bytes.SIZEOF_INT;
-    }
-
-    int current = 0;
-    for (int length : lengths) {
-      byte[] key = Arrays.copyOfRange(bytes, current, current + length);
-      keys.add(key);
-      current += length;
-    }
-
-    return keys;
-  }
-
-  /**
-   * Read an integer from the preceding {@value Bytes#SIZEOF_INT} bytes
-   * @param bytes array to read from
-   * @param start start point, backwards from which to read. For example, if specifying "25", we
-   *          would try to read an integer from 21 -> 25
-   * @return an integer from the proceeding {@value Bytes#SIZEOF_INT} bytes, if it exists.
-   */
-  private static int getPreviousInteger(byte[] bytes, int start) {
-    return Bytes.toInt(bytes, start - Bytes.SIZEOF_INT);
-  }
-
-  /**
-   * Check to see if an row key just contains a list of null values.
-   * @param bytes row key to examine
-   * @return <tt>true</tt> if all the values are zero-length, <tt>false</tt> otherwise
-   */
-  public static boolean checkRowKeyForAllNulls(byte[] bytes) {
-    int keyCount = CoveredColumnIndexCodec.getPreviousInteger(bytes, bytes.length);
-    int pos = bytes.length - Bytes.SIZEOF_INT;
-    for (int i = 0; i < keyCount; i++) {
-      int next = CoveredColumnIndexCodec.getPreviousInteger(bytes, pos);
-      if (next > 0) {
-        return false;
-      }
-      pos -= Bytes.SIZEOF_INT;
-    }
-
-    return true;
-  }
-
-  @Override
-  public boolean isEnabled(Mutation m) {
-    // this could be a bit smarter, looking at the groups for the mutation, but we leave it at this
-    // simple check for the moment.
-    return groups.size() > 0;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java
deleted file mode 100644
index 1ed758e..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.index.covered.example;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HTableDescriptor;
-
-import org.apache.hadoop.hbase.index.Indexer;
-import org.apache.hadoop.hbase.index.covered.CoveredColumnsIndexBuilder;
-import org.apache.hadoop.hbase.index.covered.IndexCodec;
-
-/**
- * Helper to build the configuration for the {@link CoveredColumnIndexer}.
- * <p>
- * This class is NOT thread-safe; all concurrent access must be managed externally.
- */
-public class CoveredColumnIndexSpecifierBuilder {
-
-  private static final String INDEX_TO_TABLE_CONF_PREFX = "hbase.index.covered.";
-  // number of index 'groups'. Each group represents a set of 'joined' columns. The data stored with
-  // each joined column are either just the columns in the group or all the most recent data in the
-  // row (a fully covered index).
-  private static final String COUNT = ".count";
-  private static final String INDEX_GROUPS_COUNT_KEY = INDEX_TO_TABLE_CONF_PREFX + ".groups" + COUNT;
-  private static final String INDEX_GROUP_PREFIX = INDEX_TO_TABLE_CONF_PREFX + "group.";
-  private static final String INDEX_GROUP_COVERAGE_SUFFIX = ".columns";
-  private static final String TABLE_SUFFIX = ".table";
-
-  // right now, we don't support this should be easy enough to add later
-  // private static final String INDEX_GROUP_FULLY_COVERED = ".covered";
-
-  List<ColumnGroup> groups = new ArrayList<ColumnGroup>();
-  private Map<String, String> specs = new HashMap<String, String>();
-
-  /**
-   * Add a group of columns to index
-   * @param columns Pairs of cf:cq (full specification of a column) to index
-   * @return the index of the group. This can be used to remove or modify the group via
-   *         {@link #remove(int)} or {@link #get(int)}, any time before building
-   */
-  public int addIndexGroup(ColumnGroup columns) {
-    if (columns == null || columns.size() == 0) {
-      throw new IllegalArgumentException("Must specify some columns to index!");
-    }
-    int size = this.groups.size();
-    this.groups.add(columns);
-    return size;
-  }
-
-  public void remove(int i) {
-    this.groups.remove(i);
-  }
-
-  public ColumnGroup get(int i) {
-    return this.groups.get(i);
-  }
-
-  /**
-   * Clear the stored {@link ColumnGroup}s for resuse.
-   */
-  public void reset() {
-    this.groups.clear();
-  }
-
-  Map<String, String> convertToMap() {
-    int total = this.groups.size();
-    // hbase.index.covered.groups = i
-    specs.put(INDEX_GROUPS_COUNT_KEY, Integer.toString(total));
-
-    int i = 0;
-    for (ColumnGroup group : groups) {
-      addIndexGroupToSpecs(specs, group, i++);
-    }
-
-    return specs;
-  }
-
-  /**
-   * @param specs
-   * @param columns
-   * @param index
-   */
-  private void addIndexGroupToSpecs(Map<String, String> specs, ColumnGroup columns, int index) {
-    // hbase.index.covered.group.<i>
-    String prefix = INDEX_GROUP_PREFIX + Integer.toString(index);
-
-    // set the table to which the group writes
-    // hbase.index.covered.group.<i>.table
-    specs.put(prefix + TABLE_SUFFIX, columns.getTable());
-    
-    // a different key for each column in the group
-    // hbase.index.covered.group.<i>.columns
-    String columnPrefix = prefix + INDEX_GROUP_COVERAGE_SUFFIX;
-    // hbase.index.covered.group.<i>.columns.count = <j>
-    String columnsSizeKey = columnPrefix + COUNT;
-    specs.put(columnsSizeKey, Integer.toString(columns.size()));
-    
-    // add each column in the group
-    int i=0; 
-    for (CoveredColumn column : columns) {
-      // hbase.index.covered.group.<i>.columns.<j>
-      String nextKey = columnPrefix + "." + Integer.toString(i);
-      String nextValue = column.serialize();
-      specs.put(nextKey, nextValue);
-      i++;
-    }
-  }
-
-  public void build(HTableDescriptor desc) throws IOException {
-    build(desc, CoveredColumnIndexCodec.class);
-  }
-
-  void build(HTableDescriptor desc, Class<? extends IndexCodec> clazz) throws IOException {
-    // add the codec for the index to the map of options
-    Map<String, String> opts = this.convertToMap();
-    opts.put(CoveredColumnsIndexBuilder.CODEC_CLASS_NAME_KEY, clazz.getName());
-    Indexer.enableIndexing(desc, CoveredColumnIndexer.class, opts);
-  }
-
-  static List<ColumnGroup> getColumns(Configuration conf) {
-    int count= conf.getInt(INDEX_GROUPS_COUNT_KEY, 0);
-    if (count ==0) {
-      return Collections.emptyList();
-    }
-
-    // parse out all the column groups we should index
-    List<ColumnGroup> columns = new ArrayList<ColumnGroup>(count);
-    for (int i = 0; i < count; i++) {
-      // parse out each group
-      String prefix = INDEX_GROUP_PREFIX + i;
-
-      // hbase.index.covered.group.<i>.table
-      String table = conf.get(prefix + TABLE_SUFFIX);
-      ColumnGroup group = new ColumnGroup(table);
-
-      // parse out each column in the group
-      // hbase.index.covered.group.<i>.columns
-      String columnPrefix = prefix + INDEX_GROUP_COVERAGE_SUFFIX;
-      // hbase.index.covered.group.<i>.columns.count = j
-      String columnsSizeKey = columnPrefix + COUNT;
-      int columnCount = conf.getInt(columnsSizeKey, 0);
-      for(int j=0; j< columnCount; j++){
-        String columnKey = columnPrefix + "." + j;
-        CoveredColumn column = CoveredColumn.parse(conf.get(columnKey));
-        group.add(column);
-      }
-
-      // add the group
-      columns.add(group);
-    }
-    return columns;
-  }
-
-  /**
-   * @param key
-   * @param value
-   */
-  public void addArbitraryConfigForTesting(String key, String value) {
-    this.specs.put(key, value);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexer.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexer.java
deleted file mode 100644
index 4244a47..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/example/CoveredColumnIndexer.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.index.covered.example;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-
-import org.apache.hadoop.hbase.index.covered.Batch;
-import org.apache.hadoop.hbase.index.covered.CoveredColumnsIndexBuilder;
-import org.apache.hadoop.hbase.index.covered.LocalTableState;
-import org.apache.hadoop.hbase.index.covered.update.IndexUpdateManager;
-
-/**
- * Index maintainer that maintains multiple indexes based on '{@link ColumnGroup}s'. Each group is a
- * fully covered within itself and stores the fully 'pre-joined' version of that values for that
- * group of columns.
- * <p>
- * <h2>Index Layout</h2> The row key for a given index entry is the current state of the all the
- * values of the columns in a column group, followed by the primary key (row key) of the original
- * row, and then the length of each value and then finally the total number of values. This is then
- * enough information to completely rebuild the latest value of row for each column in the group.
- * <p>
- * The family is always {@link CoveredColumnIndexCodec#INDEX_ROW_COLUMN_FAMILY}
- * <p>
- * The qualifier is prepended with the integer index (serialized with {@link Bytes#toBytes(int)}) of
- * the column in the group. This index corresponds the index of the value for the group in the row
- * key.
- * 
- * <pre>
- *         ROW                            ||   FAMILY     ||    QUALIFIER     ||   VALUE
- * (v1)(v2)...(vN)(pk)(L1)(L2)...(Ln)(#V) || INDEX_FAMILY ||     1Cf1:Cq1     ||  null
- * (v1)(v2)...(vN)(pk)(L1)(L2)...(Ln)(#V) || INDEX_FAMILY ||     2Cf2:Cq2     ||  null
- * ...
- * (v1)(v2)...(vN)(pk)(L1)(L2)...(Ln)(#V) || INDEX_FAMILY ||     NCfN:CqN     ||  null
- * </pre>
- * 
- * <h2>Index Maintenance</h2>
- * <p>
- * When making an insertion into the table, we also attempt to cleanup the index. This means that we
- * need to remove the previous entry from the index. Generally, this is completed by inserting a
- * delete at the previous value of the previous row.
- * <p>
- * The main caveat here is when dealing with custom timestamps. If there is no special timestamp
- * specified, we can just insert the proper {@link Delete} at the current timestamp and move on.
- * However, when the client specifies a timestamp, we could see updates out of order. In that case,
- * we can do an insert using the specified timestamp, but a delete is different...
- * <p>
- * Taking the simple case, assume we do a single column in a group. Then if we get an out of order
- * update, we need to check the current state of that column in the current row. If the current row
- * is older, we can issue a delete as normal. If the current row is newer, however, we then have to
- * issue a delete for the index update at the time of the current row. This ensures that the index
- * update made for the 'future' time still covers the existing row.
- * <p>
- * <b>ASSUMPTION:</b> all key-values in a single {@link Delete}/{@link Put} have the same timestamp.
- * This dramatically simplifies the logic needed to manage updating the index for out-of-order
- * {@link Put}s as we don't need to manage multiple levels of timestamps across multiple columns.
- * <p>
- * We can extend this to multiple columns by picking the latest update of any column in group as the
- * delete point.
- * <p>
- * <b>NOTE:</b> this means that we need to do a lookup (point {@link Get}) of the entire row
- * <i>every time there is a write to the table</i>.
- */
-public class CoveredColumnIndexer extends CoveredColumnsIndexBuilder {
-
-  /**
-   * Create the specified index table with the necessary columns
-   * @param admin {@link HBaseAdmin} to use when creating the table
-   * @param indexTable name of the index table.
-   * @throws IOException
-   */
-  public static void createIndexTable(HBaseAdmin admin, String indexTable) throws IOException {
-    createIndexTable(admin, new HTableDescriptor(indexTable));
-  }
-
-  /**
-   * @param admin to create the table
-   * @param index descriptor to update before creating table
-   */
-  public static void createIndexTable(HBaseAdmin admin, HTableDescriptor index) throws IOException {
-    HColumnDescriptor col =
-        new HColumnDescriptor(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY);
-    // ensure that we can 'see past' delete markers when doing scans
-    col.setKeepDeletedCells(true);
-    index.addFamily(col);
-    admin.createTable(index);
-  }
-
-  @Override
-  public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(
-      Collection<KeyValue> filtered) throws IOException {
-
-    // stores all the return values
-    IndexUpdateManager updateMap = new IndexUpdateManager();
-    // batch the updates by row to make life easier and ordered
-    Collection<Batch> batches = batchByRow(filtered);
-
-    for (Batch batch : batches) {
-      Put p = new Put(batch.getKvs().iterator().next().getRow());
-      for (KeyValue kv : batch.getKvs()) {
-        // we only need to cleanup Put entries
-        byte type = kv.getType();
-        Type t = KeyValue.Type.codeToType(type);
-        if (!t.equals(Type.Put)) {
-          continue;
-        }
-
-        // add the kv independently
-        p.add(kv);
-      }
-
-      // do the usual thing as for deletes
-      Collection<Batch> timeBatch = createTimestampBatchesFromMutation(p);
-      LocalTableState state = new LocalTableState(env, localTable, p);
-      for (Batch entry : timeBatch) {
-        //just set the timestamp on the table - it already has all the future state
-        state.setCurrentTimestamp(entry.getTimestamp());
-        this.addDeleteUpdatesToMap(updateMap, state, entry.getTimestamp());
-      }
-    }
-    return updateMap.toMap();
-  }
-
-
-  /**
-   * @param filtered
-   * @return
-   */
-  private Collection<Batch>  batchByRow(Collection<KeyValue> filtered) {
-    Map<Long, Batch> batches = new HashMap<Long, Batch>();
-    createTimestampBatchesFromKeyValues(filtered, batches);
-    return batches.values();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
deleted file mode 100644
index d360bd5..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.filter.FilterBase;
-
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
-
-/**
- * Only allow the 'latest' timestamp of each family:qualifier pair, ensuring that they aren't
- * covered by a previous delete. This is similar to some of the work the ScanQueryMatcher does to
- * ensure correct visibility of keys based on deletes.
- * <p>
- * No actual delete {@link KeyValue}s are allowed to pass through this filter - they are always
- * skipped.
- * <p>
- * Note there is a little bit of conceptually odd behavior (though it matches the HBase
- * specifications) around point deletes ({@link KeyValue} of type {@link Type#Delete}. These deletes
- * only apply to a single {@link KeyValue} at a single point in time - they essentially completely
- * 'cover' the existing {@link Put} at that timestamp. However, they don't 'cover' any other
- * keyvalues at older timestamps. Therefore, if there is a point-delete at ts = 5, and puts at ts =
- * 4, and ts = 5, we will only allow the put at ts = 4.
- * <p>
- * Expects {@link KeyValue}s to arrive in sorted order, with 'Delete' {@link Type} {@link KeyValue}s
- * ({@link Type#DeleteColumn}, {@link Type#DeleteFamily}, {@link Type#Delete})) before their regular
- * {@link Type#Put} counterparts.
- */
-public class ApplyAndFilterDeletesFilter extends FilterBase {
-
-  private boolean done = false;
-  List<ImmutableBytesPtr> families;
-  private final DeleteTracker coveringDelete = new DeleteTracker();
-  private Hinter currentHint;
-  private DeleteColumnHinter columnHint = new DeleteColumnHinter();
-  private DeleteFamilyHinter familyHint = new DeleteFamilyHinter();
-  
-  /**
-   * Setup the filter to only include the given families. This allows us to seek intelligently pass
-   * families we don't care about.
-   * @param families
-   */
-  public ApplyAndFilterDeletesFilter(Set<ImmutableBytesPtr> families) {
-    this.families = new ArrayList<ImmutableBytesPtr>(families);
-    Collections.sort(this.families);
-  }
-      
-  
-  private ImmutableBytesPtr getNextFamily(ImmutableBytesPtr family) {
-    int index = Collections.binarySearch(families, family);
-    //doesn't match exactly, be we can find the right next match
-    //this is pretty unlikely, but just incase
-    if(index < 0){
-      //the actual location of the next match
-      index = -index -1;
-    }else{
-      //its an exact match for a family, so we get the next entry
-      index = index +1;
-    }
-    //now we have the location of the next entry
-    if(index >= families.size()){
-      return null;
-    }
-    return  families.get(index);
-  }
-  
-  @Override
-  public void reset(){
-    this.coveringDelete.reset();
-    this.done = false;
-  }
-  
-  
-  @Override
-  public KeyValue getNextKeyHint(KeyValue peeked){
-    return currentHint.getHint(peeked);
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(KeyValue next) {
-    // we marked ourselves done, but the END_ROW_KEY didn't manage to seek to the very last key
-    if (this.done) {
-      return ReturnCode.SKIP;
-    }
-
-    switch (KeyValue.Type.codeToType(next.getType())) {
-    /*
-     * DeleteFamily will always sort first because those KVs (we assume) don't have qualifiers (or
-     * rather are null). Therefore, we have to keep a hold of all the delete families until we get
-     * to a Put entry that is covered by that delete (in which case, we are done with the family).
-     */
-    case DeleteFamily:
-      // track the family to delete. If we are updating the delete, that means we have passed all
-      // kvs in the last column, so we can safely ignore the last deleteFamily, and just use this
-      // one. In fact, it means that all the previous deletes can be ignored because the family must
-      // not match anymore.
-      this.coveringDelete.reset();
-      this.coveringDelete.deleteFamily = next;
-      return ReturnCode.SKIP;
-    case DeleteColumn:
-      // similar to deleteFamily, all the newer deletes/puts would have been seen at this point, so
-      // we can safely replace the more recent delete column with the more recent one
-      this.coveringDelete.pointDelete = null;
-      this.coveringDelete.deleteColumn = next;
-      return ReturnCode.SKIP;
-    case Delete:
-      // we are just deleting the single column value at this point.
-      // therefore we just skip this entry and go onto the next one. The only caveat is that
-      // we should still cover the next entry if this delete applies to the next entry, so we
-      // have to keep around a reference to the KV to compare against the next valid entry
-      this.coveringDelete.pointDelete = next;
-      return ReturnCode.SKIP;
-    default:
-      // no covering deletes
-      if (coveringDelete.empty()) {
-        return ReturnCode.INCLUDE;
-      }
-
-      if (coveringDelete.matchesFamily(next)) {
-        this.currentHint = familyHint;
-        return ReturnCode.SEEK_NEXT_USING_HINT;
-      }
-
-      if (coveringDelete.matchesColumn(next)) {
-        // hint to the next column
-        this.currentHint = columnHint;
-        return ReturnCode.SEEK_NEXT_USING_HINT;
-      }
-
-      if (coveringDelete.matchesPoint(next)) {
-        return ReturnCode.SKIP;
-      }
-
-    }
-
-    // none of the deletes matches, we are done
-    return ReturnCode.INCLUDE;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    throw new UnsupportedOperationException("Server-side only filter, cannot be serialized!");
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    throw new UnsupportedOperationException("Server-side only filter, cannot be deserialized!");
-  }
-
-  /**
-   * Get the next hint for a given peeked keyvalue
-   */
-  interface Hinter {
-    public abstract KeyValue getHint(KeyValue peek);
-  }
-
-  /**
-   * Entire family has been deleted, so either seek to the next family, or if none are present in
-   * the original set of families to include, seek to the "last possible key"(or rather our best
-   * guess) and be done.
-   */
-  class DeleteFamilyHinter implements Hinter {
-
-    @Override
-    public KeyValue getHint(KeyValue peeked) {
-      // check to see if we have another column to seek
-      ImmutableBytesPtr nextFamily =
-          getNextFamily(new ImmutableBytesPtr(peeked.getBuffer(), peeked.getFamilyOffset(),
-              peeked.getFamilyLength()));
-      if (nextFamily == null) {
-        // no known next family, so we can be completely done
-        done = true;
-        return KeyValue.LOWESTKEY;
-      }
-        // there is a valid family, so we should seek to that
-      return KeyValue.createFirstOnRow(peeked.getRow(), nextFamily.copyBytesIfNecessary(),
-        HConstants.EMPTY_BYTE_ARRAY);
-    }
-
-  }
-
-  /**
-   * Hint the next column-qualifier after the given keyvalue. We can't be smart like in the
-   * ScanQueryMatcher since we don't know the columns ahead of time.
-   */
-  class DeleteColumnHinter implements Hinter {
-
-    @Override
-    public KeyValue getHint(KeyValue kv) {
-      return KeyValue.createLastOnRow(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(),
-        kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getBuffer(),
-        kv.getQualifierOffset(), kv.getQualifierLength());
-    }
-  }
-
-  class DeleteTracker {
-
-    public KeyValue deleteFamily;
-    public KeyValue deleteColumn;
-    public KeyValue pointDelete;
-
-    public void reset() {
-      this.deleteFamily = null;
-      this.deleteColumn = null;
-      this.pointDelete = null;
-
-    }
-
-    /**
-     * Check to see if we should skip this {@link KeyValue} based on the family.
-     * <p>
-     * Internally, also resets the currently tracked "Delete Family" marker we are tracking if the
-     * keyvalue is into another family (since CFs sort lexicographically, we can discard the current
-     * marker since it must not be applicable to any more kvs in a linear scan).
-     * @param next
-     * @return <tt>true</tt> if this {@link KeyValue} matches a delete.
-     */
-    public boolean matchesFamily(KeyValue next) {
-      if (deleteFamily == null) {
-        return false;
-      }
-      if (deleteFamily.matchingFamily(next)) {
-        // falls within the timestamp range
-        if (deleteFamily.getTimestamp() >= next.getTimestamp()) {
-          return true;
-        }
-      } else {
-        // only can reset the delete family because we are on to another family
-        deleteFamily = null;
-      }
-
-      return false;
-    }
-
-
-    /**
-     * @param next
-     * @return
-     */
-    public boolean matchesColumn(KeyValue next) {
-      if (deleteColumn == null) {
-        return false;
-      }
-      if (deleteColumn.matchingFamily(next) && deleteColumn.matchingQualifier(next)) {
-        // falls within the timestamp range
-        if (deleteColumn.getTimestamp() >= next.getTimestamp()) {
-          return true;
-        }
-      } else {
-        deleteColumn = null;
-      }
-      return false;
-    }
-
-    /**
-     * @param next
-     * @return
-     */
-    public boolean matchesPoint(KeyValue next) {
-      // point deletes only apply to the exact KV that they reference, so we only need to ensure
-      // that the timestamp matches exactly. Because we sort by timestamp first, either the next
-      // keyvalue has the exact timestamp or is an older (smaller) timestamp, and we can allow that
-      // one.
-      if (pointDelete != null && pointDelete.matchingFamily(next)
-          && pointDelete.matchingQualifier(next)) {
-        if (pointDelete.getTimestamp() == next.getTimestamp()) {
-          return true;
-        }
-        // clear the point delete since the TS must not be matching
-        coveringDelete.pointDelete = null;
-      }
-      return false;
-    }
-
-    /**
-     * @return <tt>true</tt> if no delete has been set
-     */
-    public boolean empty() {
-      return deleteFamily == null && deleteColumn == null && pointDelete == null;
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/ColumnTrackingNextLargestTimestampFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/ColumnTrackingNextLargestTimestampFilter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/ColumnTrackingNextLargestTimestampFilter.java
deleted file mode 100644
index 2660b9f..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/ColumnTrackingNextLargestTimestampFilter.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.FilterBase;
-
-import org.apache.hadoop.hbase.index.covered.update.ColumnTracker;
-
-/**
- * Similar to the {@link MaxTimestampFilter}, but also updates the 'next largest' timestamp seen
- * that is not skipped by the below criteria. Note that it isn't as quick as the
- * {@link MaxTimestampFilter} as we can't just seek ahead to a key with the matching timestamp, but
- * have to iterate each kv until we find the right one with an allowed timestamp.
- * <p>
- * Inclusively filter on the maximum timestamp allowed. Excludes all elements greater than (but not
- * equal to) the given timestamp, so given ts = 5, a {@link KeyValue} with ts 6 is excluded, but not
- * one with ts = 5.
- * <p>
- * This filter generally doesn't make sense on its own - it should follow a per-column filter and
- * possible a per-delete filter to only track the most recent (but not exposed to the user)
- * timestamp.
- */
-public class ColumnTrackingNextLargestTimestampFilter extends FilterBase {
-
-  private long ts;
-  private ColumnTracker column;
-
-  public ColumnTrackingNextLargestTimestampFilter(long maxTime, ColumnTracker toTrack) {
-    this.ts = maxTime;
-    this.column = toTrack;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(KeyValue v) {
-    long timestamp = v.getTimestamp();
-    if (timestamp > ts) {
-      this.column.setTs(timestamp);
-      return ReturnCode.SKIP;
-    }
-    return ReturnCode.INCLUDE;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    throw new UnsupportedOperationException("Server-side only filter, cannot be serialized!");
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    throw new UnsupportedOperationException("Server-side only filter, cannot be deserialized!");
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 8591f88..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-    this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final WritableByteArrayComparable familyComparator) {
-    super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-    return done;
-  }
-
-  @Override
-  public void reset() {
-    done = false;
-    previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(KeyValue v) {
-    if (done) {
-      return ReturnCode.SKIP;
-    }
-    ReturnCode code = super.filterKeyValue(v);
-    if (previousMatchFound) {
-      // we found a match before, and now we are skipping the key because of the family, therefore
-      // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP)) {
-      done = true;
-      }
-    } else {
-      // if we haven't seen a match before, then it doesn't matter what we see now, except to mark
-      // if we've seen a match
-      if (code.equals(ReturnCode.INCLUDE)) {
-        previousMatchFound = true;
-      }
-    }
-    return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/MaxTimestampFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/MaxTimestampFilter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/MaxTimestampFilter.java
deleted file mode 100644
index 002f2ac..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/MaxTimestampFilter.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.FilterBase;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Inclusive filter on the maximum timestamp allowed. Excludes all elements greater than (but not
- * equal to) the given timestamp, so given ts = 5, a {@link KeyValue} with ts 6 is excluded, but not
- * one with ts = 5.
- */
-public class MaxTimestampFilter extends FilterBase {
-
-  private long ts;
-
-  public MaxTimestampFilter(long maxTime) {
-    this.ts = maxTime;
-  }
-
-  @Override
-  public KeyValue getNextKeyHint(KeyValue currentKV) {
-    // this might be a little excessive right now - better safe than sorry though, so we don't mess
-    // with other filters too much.
-    KeyValue kv = currentKV.deepCopy();
-    int offset =kv.getTimestampOffset();
-    //set the timestamp in the buffer
-    byte[] buffer = kv.getBuffer();
-    byte[] ts = Bytes.toBytes(this.ts);
-    System.arraycopy(ts, 0, buffer, offset, ts.length);
-
-    return kv;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(KeyValue v) {
-    long timestamp = v.getTimestamp();
-    if (timestamp > ts) {
-      return ReturnCode.SEEK_NEXT_USING_HINT;
-    }
-    return ReturnCode.INCLUDE;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    throw new UnsupportedOperationException("Server-side only filter, cannot be serialized!");
-
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    throw new UnsupportedOperationException("Server-side only filter, cannot be deserialized!");
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/NewerTimestampFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/NewerTimestampFilter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/NewerTimestampFilter.java
deleted file mode 100644
index ada2601..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/filter/NewerTimestampFilter.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.index.covered.filter;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.FilterBase;
-
-/**
- * Server-side only class used in the indexer to filter out keyvalues newer than a given timestamp
- * (so allows anything <code><=</code> timestamp through).
- * <p>
- * Note,<tt>this</tt> doesn't support {@link #write(DataOutput)} or {@link #readFields(DataInput)}.
- */
-public class NewerTimestampFilter extends FilterBase {
-
-  private long timestamp;
-
-  public NewerTimestampFilter(long timestamp) {
-    this.timestamp = timestamp;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(KeyValue ignored) {
-    return ignored.getTimestamp() > timestamp ? ReturnCode.SKIP : ReturnCode.INCLUDE;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    throw new UnsupportedOperationException("TimestampFilter is server-side only!");
-  }
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    throw new UnsupportedOperationException("TimestampFilter is server-side only!");
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/ColumnReference.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/ColumnReference.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/ColumnReference.java
deleted file mode 100644
index ca2b5e4..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/ColumnReference.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.update;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
-
-/**
- * 
- */
-public class ColumnReference implements Comparable<ColumnReference> {
-    
-  public static final byte[] ALL_QUALIFIERS = new byte[0];
-  
-  private static int calcHashCode(byte[] family, byte[] qualifier) {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + Bytes.hashCode(family);
-    result = prime * result + Bytes.hashCode(qualifier);
-    return result;
-  }
-
-  private final int hashCode;
-  protected final byte[] family;
-  protected final byte[] qualifier;
-    private volatile ImmutableBytesWritable familyPtr;
-    private volatile ImmutableBytesWritable qualifierPtr;
-
-  public ColumnReference(byte[] family, byte[] qualifier) {
-    this.family = family;
-    this.qualifier = qualifier;
-    this.hashCode = calcHashCode(family, qualifier);
-  }
-
-  public byte[] getFamily() {
-    return this.family;
-  }
-
-  public byte[] getQualifier() {
-    return this.qualifier;
-  }
-  
-    public ImmutableBytesWritable getFamilyWritable() {
-        if (this.familyPtr == null) {
-            synchronized (this.family) {
-                if (this.familyPtr == null) {
-                    this.familyPtr = new ImmutableBytesPtr(this.family);
-                }
-            }
-        }
-        return this.familyPtr;
-    }
-
-    public ImmutableBytesWritable getQualifierWritable() {
-        if (this.qualifierPtr == null) {
-            synchronized (this.qualifier) {
-                if (this.qualifierPtr == null) {
-                    this.qualifierPtr = new ImmutableBytesPtr(this.qualifier);
-                }
-            }
-        }
-        return this.qualifierPtr;
-    }
-
-  public boolean matches(KeyValue kv) {
-    if (matchesFamily(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength())) {
-      return matchesQualifier(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength());
-    }
-    return false;
-  }
-
-  /**
-   * @param qual to check against
-   * @return <tt>true</tt> if this column covers the given qualifier.
-   */
-  public boolean matchesQualifier(byte[] qual) {
-    return matchesQualifier(qual, 0, qual.length);
-  }
-
-  public boolean matchesQualifier(byte[] bytes, int offset, int length) {
-    return allColumns() ? true : match(bytes, offset, length, qualifier);
-  }
-
-  /**
-   * @param family to check against
-   * @return <tt>true</tt> if this column covers the given family.
-   */
-  public boolean matchesFamily(byte[] family) {
-    return matchesFamily(family, 0, family.length);
-  }
-
-  public boolean matchesFamily(byte[] bytes, int offset, int length) {
-    return match(bytes, offset, length, family);
-  }
-
-  /**
-   * @return <tt>true</tt> if this should include all column qualifiers, <tt>false</tt> otherwise
-   */
-  public boolean allColumns() {
-    return this.qualifier == ALL_QUALIFIERS;
-  }
-
-  /**
-   * Check to see if the passed bytes match the stored bytes
-   * @param first
-   * @param storedKey the stored byte[], should never be <tt>null</tt>
-   * @return <tt>true</tt> if they are byte-equal
-   */
-  private boolean match(byte[] first, int offset, int length, byte[] storedKey) {
-    return first == null ? false : Bytes.equals(first, offset, length, storedKey, 0,
-      storedKey.length);
-  }
-
-  public KeyValue getFirstKeyValueForRow(byte[] row) {
-    return KeyValue.createFirstOnRow(row, family, qualifier == ALL_QUALIFIERS ? null : qualifier);
-  }
-
-  @Override
-  public int compareTo(ColumnReference o) {
-    int c = Bytes.compareTo(family, o.family);
-    if (c == 0) {
-      // matching families, compare qualifiers
-      c = Bytes.compareTo(qualifier, o.qualifier);
-    }
-    return c;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (o instanceof ColumnReference) {
-      ColumnReference other = (ColumnReference) o;
-      if (hashCode == other.hashCode && Bytes.equals(family, other.family)) {
-        return Bytes.equals(qualifier, other.qualifier);
-      }
-    }
-    return false;
-  }
-
-  @Override
-  public int hashCode() {
-    return hashCode;
-  }
-
-  @Override
-  public String toString() {
-    return "ColumnReference - " + Bytes.toString(family) + ":" + Bytes.toString(qualifier);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/ColumnTracker.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/ColumnTracker.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/ColumnTracker.java
deleted file mode 100644
index 3b4d266..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/ColumnTracker.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.update;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-
-
-/**
- * Simple POJO for tracking a bunch of column references and the next-newest timestamp for those
- * columns
- * <p>
- * Two {@link ColumnTracker}s are considered equal if they track the same columns, even if their
- * timestamps are different.
- */
-public class ColumnTracker implements IndexedColumnGroup {
-
-  public static final long NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP = Long.MAX_VALUE;
-  public static final long GUARANTEED_NEWER_UPDATES = Long.MIN_VALUE;
-  private final List<ColumnReference> columns;
-  private long ts = NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP;
-  private final int hashCode;
-
-  private static int calcHashCode(List<ColumnReference> columns) {
-      return columns.hashCode();
-    }
-
-  public ColumnTracker(Collection<? extends ColumnReference> columns) {
-    this.columns = new ArrayList<ColumnReference>(columns);
-    // sort the columns
-    Collections.sort(this.columns);
-    this.hashCode = calcHashCode(this.columns);
-  }
-
-  /**
-   * Set the current timestamp, only if the passed timestamp is strictly less than the currently
-   * stored timestamp
-   * @param ts the timestmap to potentially store.
-   * @return the currently stored timestamp.
-   */
-  public long setTs(long ts) {
-    this.ts = this.ts > ts ? ts : this.ts;
-    return this.ts;
-  }
-
-  public long getTS() {
-    return this.ts;
-  }
-
-  @Override
-  public int hashCode() {
-    return hashCode;
-  }
-
-  @Override
-  public boolean equals(Object o){
-    if(!(o instanceof ColumnTracker)){
-      return false;
-    }
-    ColumnTracker other = (ColumnTracker)o;
-    if (hashCode != other.hashCode) {
-        return false;
-    }
-    if (other.columns.size() != columns.size()) {
-      return false;
-    }
-
-    // check each column to see if they match
-    for (int i = 0; i < columns.size(); i++) {
-      if (!columns.get(i).equals(other.columns.get(i))) {
-        return false;
-      }
-    }
-
-    return true;
-  }
-
-  @Override
-  public List<ColumnReference> getColumns() {
-    return this.columns;
-  }
-
-  /**
-   * @return <tt>true</tt> if this set of columns has seen a column with a timestamp newer than the
-   *         requested timestamp, <tt>false</tt> otherwise.
-   */
-  public boolean hasNewerTimestamps() {
-    return !isNewestTime(this.ts);
-  }
-
-  /**
-   * @param ts timestamp to check
-   * @return <tt>true</tt> if the timestamp is at the most recent timestamp for a column
-   */
-  public static boolean isNewestTime(long ts) {
-    return ts == NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/IndexUpdateManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/IndexUpdateManager.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/IndexUpdateManager.java
deleted file mode 100644
index 173a2ea..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/IndexUpdateManager.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.update;
-
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-
-import com.google.common.collect.Lists;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
-
-/**
- * Keeps track of the index updates
- */
-public class IndexUpdateManager {
-
-  public Comparator<Mutation> COMPARATOR = new MutationComparator();
-  class MutationComparator implements Comparator<Mutation> {
-
-    @Override
-    public int compare(Mutation o1, Mutation o2) {
-      // always sort rows first
-      int compare = o1.compareTo(o2);
-      if (compare != 0) {
-        return compare;
-      }
-
-      // if same row, sort by reverse timestamp (larger first)
-      compare = Longs.compare(o2.getTimeStamp(), o1.getTimeStamp());
-      if (compare != 0) {
-        return compare;
-      }
-      // deletes always sort before puts for the same row
-      if (o1 instanceof Delete) {
-        // same row, same ts == same delete since we only delete rows
-        if (o2 instanceof Delete) {
-          return 0;
-        } else {
-          // o2 has to be a put
-          return -1;
-        }
-      }
-      // o1 must be a put
-      if (o2 instanceof Delete) {
-        return 1;
-      } else if (o2 instanceof Put) {
-        return comparePuts((Put) o1, (Put) o2);
-      }
-
-      throw new RuntimeException(
-          "Got unexpected mutation types! Can only be Put or Delete, but got: " + o1 + ", and "
-              + o2);
-    }
-
-    private int comparePuts(Put p1, Put p2) {
-      int p1Size = p1.size();
-      int p2Size = p2.size();
-      int compare = p1Size - p2Size;
-      if (compare == 0) {
-        // TODO: make this a real comparison
-        // this is a little cheating, but we don't really need to worry too much about this being
-        // the same - chances are that exact matches here are really the same update.
-        return Longs.compare(p1.heapSize(), p2.heapSize());
-      }
-      return compare;
-    }
-
-  }
-
-  private static final String PHOENIX_HBASE_TEMP_DELETE_MARKER = "phoenix.hbase.temp.delete.marker";
-  private static final byte[] TRUE_MARKER = new byte[] { 1 };
-
-  protected final Map<ImmutableBytesPtr, Collection<Mutation>> map =
-      new HashMap<ImmutableBytesPtr, Collection<Mutation>>();
-
-  /**
-   * Add an index update. Keeps the latest {@link Put} for a given timestamp
-   * @param tableName
-   * @param m
-   */
-  public void addIndexUpdate(byte[] tableName, Mutation m) {
-    // we only keep the most recent update
-    ImmutableBytesPtr key = new ImmutableBytesPtr(tableName);
-    Collection<Mutation> updates = map.get(key);
-    if (updates == null) {
-      updates = new SortedCollection<Mutation>(COMPARATOR);
-      map.put(key, updates);
-    }
-    fixUpCurrentUpdates(updates, m);
-  }
-
-  /**
-   * Fix up the current updates, given the pending mutation.
-   * @param updates current updates
-   * @param pendingMutation
-   */
-  protected void fixUpCurrentUpdates(Collection<Mutation> updates, Mutation pendingMutation) {
-    // need to check for each entry to see if we have a duplicate
-    Mutation toRemove = null;
-    Delete pendingDelete = pendingMutation instanceof Delete ? (Delete) pendingMutation : null;
-    boolean sawRowMatch = false;
-    for (Mutation stored : updates) {
-      int compare = pendingMutation.compareTo(stored);
-      // skip to the right row
-      if (compare < 0) {
-        continue;
-      } else if (compare > 0) {
-        if (sawRowMatch) {
-          break;
-        }
-        continue;
-      }
-
-      // set that we saw a row match, so any greater row will necessarily be the wrong
-      sawRowMatch = true;
-
-      // skip until we hit the right timestamp
-      if (stored.getTimeStamp() < pendingMutation.getTimeStamp()) {
-        continue;
-      }
-
-      if (stored instanceof Delete) {
-        // we already have a delete for this row, so we are done.
-        if (pendingDelete != null) {
-          return;
-        }
-        // pending update must be a Put, so we ignore the Put.
-        // add a marker in the this delete that it has been canceled out already. We need to keep
-        // the delete around though so we can figure out if other Puts would also be canceled out.
-        markMutationForRemoval(stored);
-        return;
-      }
-
-      // otherwise, the stored mutation is a Put. Either way, we want to remove it. If the pending
-      // update is a delete, we need to remove the entry (no longer applies - covered by the
-      // delete), or its an older version of the row, so we cover it with the newer.
-      toRemove = stored;
-      if (pendingDelete != null) {
-        // the pending mutation, but we need to mark the mutation for removal later
-        markMutationForRemoval(pendingMutation);
-        break;
-      }
-    }
-    
-    updates.remove(toRemove);
-    updates.add(pendingMutation);
-  }
-
-  private void markMutationForRemoval(Mutation m) {
-    m.setAttribute(PHOENIX_HBASE_TEMP_DELETE_MARKER, TRUE_MARKER);
-  }
-
-  public List<Pair<Mutation, byte[]>> toMap() {
-    List<Pair<Mutation, byte[]>> updateMap = Lists.newArrayList();
-    for (Entry<ImmutableBytesPtr, Collection<Mutation>> updates : map.entrySet()) {
-      // get is ok because we always set with just the bytes
-      byte[] tableName = updates.getKey().get();
-      // TODO replace this as just storing a byte[], to avoid all the String <-> byte[] swapping
-      // HBase does
-      for (Mutation m : updates.getValue()) {
-        // skip elements that have been marked for delete
-        if (shouldBeRemoved(m)) {
-          continue;
-        }
-        updateMap.add(new Pair<Mutation, byte[]>(m, tableName));
-      }
-    }
-    return updateMap;
-  }
-
-  /**
-   * @param updates
-   */
-  public void addAll(Collection<Pair<Mutation, String>> updates) {
-    for (Pair<Mutation, String> update : updates) {
-      addIndexUpdate(Bytes.toBytes(update.getSecond()), update.getFirst());
-    }
-  }
-
-  private boolean shouldBeRemoved(Mutation m) {
-    return m.getAttribute(PHOENIX_HBASE_TEMP_DELETE_MARKER) != null;
-  }
-
-  @Override
-  public String toString() {
-    StringBuffer sb = new StringBuffer("Pending Index Updates:\n");
-    for (Entry<ImmutableBytesPtr, Collection<Mutation>> entry : map.entrySet()) {
-      String tableName = Bytes.toString(entry.getKey().get());
-      sb.append("   Table: '" + tableName + "'\n");
-      for (Mutation m : entry.getValue()) {
-        sb.append("\t");
-        if (shouldBeRemoved(m)) {
-          sb.append("[REMOVED]");
-        }
-        sb.append(m.getClass().getSimpleName() + ":"
-            + ((m instanceof Put) ? m.getTimeStamp() + " " : ""));
-        sb.append(" row=" + Bytes.toString(m.getRow()));
-        sb.append("\n");
-        if (m.getFamilyMap().isEmpty()) {
-          sb.append("\t\t=== EMPTY ===\n");
-        }
-        for (List<KeyValue> kvs : m.getFamilyMap().values()) {
-          for (KeyValue kv : kvs) {
-            sb.append("\t\t" + kv.toString() + "/value=" + Bytes.toStringBinary(kv.getValue()));
-            sb.append("\n");
-          }
-        }
-      }
-    }
-    return sb.toString();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/IndexedColumnGroup.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/IndexedColumnGroup.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/IndexedColumnGroup.java
deleted file mode 100644
index 76128d3..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/IndexedColumnGroup.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.update;
-
-import java.util.List;
-
-/**
- * Group of columns that were requested to build an index
- */
-public interface IndexedColumnGroup {
-
-  public List<ColumnReference> getColumns();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/SortedCollection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/SortedCollection.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/SortedCollection.java
deleted file mode 100644
index d76646d..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/update/SortedCollection.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.update;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.PriorityQueue;
-
-import com.google.common.collect.Iterators;
-
-/**
- * A collection whose elements are stored and returned sorted.
- * <p>
- * We can't just use something like a {@link PriorityQueue} because it doesn't return the
- * underlying values in sorted order.
- * @param <T>
- */
-class SortedCollection<T> implements Collection<T>, Iterable<T> {
-
-  private PriorityQueue<T> queue;
-  private Comparator<T> comparator;
-
-  /**
-   * Use the given comparator to compare all keys for sorting
-   * @param comparator
-   */
-  public SortedCollection(Comparator<T> comparator) {
-    this.queue = new PriorityQueue<T>(1, comparator);
-    this.comparator = comparator;
-  }
-  
-  /**
-   * All passed elements are expected to be {@link Comparable}
-   */
-  public SortedCollection() {
-    this.queue = new PriorityQueue<T>();
-  }
-  
-  @Override
-  public int size() {
-    return this.queue.size();
-  }
-
-  @Override
-  public boolean isEmpty() {
-    return this.queue.isEmpty();
-  }
-
-  @Override
-  public boolean contains(Object o) {
-    return this.queue.contains(o);
-  }
-
-  @Override
-  public Iterator<T> iterator() {
-    @SuppressWarnings("unchecked")
-    T[] array = (T[]) this.queue.toArray();
-    if (this.comparator == null) {
-      Arrays.sort(array);
-    } else {
-      Arrays.sort(
-     array, this.comparator);}
-    return Iterators.forArray(array);
-  }
-
-  @Override
-  public Object[] toArray() {
-    return this.queue.toArray();
-  }
-
-  @SuppressWarnings("hiding")
-  @Override
-  public <T> T[] toArray(T[] a) {
-    return this.queue.toArray(a);
-  }
-
-  @Override
-  public boolean add(T e) {
-    return this.queue.add(e);
-  }
-
-  @Override
-  public boolean remove(Object o) {
-    return this.queue.remove(o);
-  }
-
-  @Override
-  public boolean containsAll(Collection<?> c) {
-    return this.queue.containsAll(c);
-  }
-
-  @Override
-  public boolean addAll(Collection<? extends T> c) {
-    return this.queue.addAll(c);
-  }
-
-  @Override
-  public boolean removeAll(Collection<?> c) {
-    return queue.removeAll(c);
-  }
-
-  @Override
-  public boolean retainAll(Collection<?> c) {
-    return this.queue.retainAll(c);
-  }
-
-  @Override
-  public void clear() {
-    this.queue.clear();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/IndexWriteException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/IndexWriteException.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/IndexWriteException.java
deleted file mode 100644
index 45045cc..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/IndexWriteException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.exception;
-
-import org.apache.hadoop.hbase.HBaseIOException;
-
-/**
- * Generic {@link Exception} that an index write has failed
- */
-@SuppressWarnings("serial")
-public class IndexWriteException extends HBaseIOException {
-
-  public IndexWriteException() {
-    super();
-  }
-
-  public IndexWriteException(String message, Throwable cause) {
-    super(message, cause);
-  }
-
-  public IndexWriteException(String message) {
-    super(message);
-  }
-
-  public IndexWriteException(Throwable cause) {
-    super(cause);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/MultiIndexWriteFailureException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/MultiIndexWriteFailureException.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/MultiIndexWriteFailureException.java
deleted file mode 100644
index cfc55c7..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/MultiIndexWriteFailureException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.exception;
-
-import java.util.List;
-
-import org.apache.hadoop.hbase.index.table.HTableInterfaceReference;
-
-/**
- * Indicate a failure to write to multiple index tables.
- */
-@SuppressWarnings("serial")
-public class MultiIndexWriteFailureException extends IndexWriteException {
-
-  private List<HTableInterfaceReference> failures;
-
-  /**
-   * @param failures the tables to which the index write did not succeed
-   */
-  public MultiIndexWriteFailureException(List<HTableInterfaceReference> failures) {
-    super("Failed to write to multiple index tables");
-    this.failures = failures;
-
-  }
-
-  public List<HTableInterfaceReference> getFailedTables() {
-    return this.failures;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/SingleIndexWriteFailureException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/SingleIndexWriteFailureException.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/SingleIndexWriteFailureException.java
deleted file mode 100644
index 76b9eb8..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/exception/SingleIndexWriteFailureException.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.exception;
-
-import java.util.List;
-
-import org.apache.hadoop.hbase.client.Mutation;
-
-/**
- * Exception thrown if we cannot successfully write to an index table.
- */
-@SuppressWarnings("serial")
-public class SingleIndexWriteFailureException extends IndexWriteException {
-
-  private String table;
-
-  /**
-   * Cannot reach the index, but not sure of the table or the mutations that caused the failure
-   * @param msg more description of what happened
-   * @param cause original cause
-   */
-  public SingleIndexWriteFailureException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-
-  /**
-   * Failed to write the passed mutations to an index table for some reason.
-   * @param targetTableName index table to which we attempted to write
-   * @param mutations mutations that were attempted
-   * @param cause underlying reason for the failure
-   */
-  public SingleIndexWriteFailureException(String targetTableName, List<Mutation> mutations,
-      Exception cause) {
-    super("Failed to make index update:\n\t table: " + targetTableName + "\n\t edits: " + mutations
-        + "\n\tcause: " + cause == null ? "UNKNOWN" : cause.getMessage(), cause);
-    this.table = targetTableName;
-  }
-
-  /**
-   * @return The table to which we failed to write the index updates. If unknown, returns
-   *         <tt>null</tt>
-   */
-  public String getTableName() {
-    return this.table;
-  }
-}
\ No newline at end of file