You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by mu...@apache.org on 2014/02/15 01:07:38 UTC

[05/15] Rename package from org.apache.hadoop.hbase.index.* to org.apache.phoenix.index.* to fix classloader issue causing mutable index performance regression - https://issues.apache.org/jira/browse/PHOENIX-38

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java
deleted file mode 100644
index 0f2ed37..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredColumnIndexCodec.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.example;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.index.covered.IndexCodec;
-import org.apache.hadoop.hbase.index.covered.IndexUpdate;
-import org.apache.hadoop.hbase.index.covered.LocalTableState;
-import org.apache.hadoop.hbase.index.covered.data.LocalHBaseState;
-import org.apache.hadoop.hbase.index.covered.example.CoveredColumnIndexCodec.ColumnEntry;
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-
-public class TestCoveredColumnIndexCodec {
-  private static final byte[] PK = new byte[] { 'a' };
-  private static final String FAMILY_STRING = "family";
-  private static final byte[] FAMILY = Bytes.toBytes(FAMILY_STRING);
-  private static final byte[] QUAL = Bytes.toBytes("qual");
-  private static final CoveredColumn COLUMN_REF = new CoveredColumn(FAMILY_STRING, QUAL);
-  private static final byte[] EMPTY_INDEX_KEY = CoveredColumnIndexCodec.composeRowKey(PK, 0,
-    Arrays.asList(toColumnEntry(new byte[0])));
-  private static final byte[] BLANK_INDEX_KEY = CoveredColumnIndexCodec.composeRowKey(PK, 0,
-    Collections.<ColumnEntry> emptyList());
-
-  private static ColumnEntry toColumnEntry(byte[] bytes) {
-    return new ColumnEntry(bytes, COLUMN_REF);
-  }
-
-  /**
-   * Convert between an index and a bunch of values
-   * @throws Exception
-   */
-  @Test
-  public void toFromIndexKey() throws Exception {
-    // start with empty values
-    byte[] indexKey = BLANK_INDEX_KEY;
-    List<byte[]> stored = CoveredColumnIndexCodec.getValues(indexKey);
-    assertEquals("Found some stored values in an index row key that wasn't created with values!",
-      0, stored.size());
-
-    // a single, empty value
-    indexKey = EMPTY_INDEX_KEY;
-    stored = CoveredColumnIndexCodec.getValues(indexKey);
-    assertEquals("Found some stored values in an index row key that wasn't created with values!",
-      1, stored.size());
-    assertEquals("Found a non-zero length value: " + Bytes.toString(stored.get(0)), 0,
-      stored.get(0).length);
-
-    // try with a couple values, some different lengths
-    byte[] v1 = new byte[] { 'a' };
-    byte[] v2 = new byte[] { 'b' };
-    byte[] v3 = Bytes.toBytes("v3");
-    int len = v1.length + v2.length + v3.length;
-    indexKey =
-        CoveredColumnIndexCodec.composeRowKey(PK, len,
-          Arrays.asList(toColumnEntry(v1), toColumnEntry(v2), toColumnEntry(v3)));
-    stored = CoveredColumnIndexCodec.getValues(indexKey);
-    assertEquals("Didn't find expected number of values in index key!", 3, stored.size());
-    assertTrue("First index keys don't match!", Bytes.equals(v1, stored.get(0)));
-    assertTrue("Second index keys don't match!", Bytes.equals(v2, stored.get(1)));
-    assertTrue("Third index keys don't match!", Bytes.equals(v3, stored.get(2)));
-  }
-
-  /**
-   * Ensure that we correctly can determine when a row key is empty (no values).
-   */
-  @Test
-  public void testCheckRowKeyForAllNulls() {
-    byte[] pk = new byte[] { 'a', 'b', 'z' };
-    // check positive cases first
-    byte[] result = EMPTY_INDEX_KEY;
-    assertTrue("Didn't correctly read single element as being null in row key",
-      CoveredColumnIndexCodec.checkRowKeyForAllNulls(result));
-    result =
-        CoveredColumnIndexCodec.composeRowKey(pk, 0,
-          Lists.newArrayList(toColumnEntry(new byte[0]), toColumnEntry(new byte[0])));
-    assertTrue("Didn't correctly read two elements as being null in row key",
-      CoveredColumnIndexCodec.checkRowKeyForAllNulls(result));
-
-    // check cases where it isn't null
-    result =
-        CoveredColumnIndexCodec.composeRowKey(pk, 2,
-          Arrays.asList(toColumnEntry(new byte[] { 1, 2 })));
-    assertFalse("Found a null key, when it wasn't!",
-      CoveredColumnIndexCodec.checkRowKeyForAllNulls(result));
-    result =
-        CoveredColumnIndexCodec.composeRowKey(pk, 2,
-          Arrays.asList(toColumnEntry(new byte[] { 1, 2 }), toColumnEntry(new byte[0])));
-    assertFalse("Found a null key, when it wasn't!",
-      CoveredColumnIndexCodec.checkRowKeyForAllNulls(result));
-  }
-
-  private static class SimpleTableState implements LocalHBaseState {
-
-    private Result r;
-
-    public SimpleTableState(Result r) {
-      this.r = r;
-    }
-
-    @Override
-    public Result getCurrentRowState(Mutation m, Collection<? extends ColumnReference> toCover)
-        throws IOException {
-      return r;
-    }
-
-  }
-
-  /**
-   * Test that we get back the correct index updates for a given column group
-   * @throws Exception on failure
-   */
-  @Test
-  public void testGeneratedIndexUpdates() throws Exception {
-    ColumnGroup group = new ColumnGroup("test-column-group");
-    group.add(COLUMN_REF);
-
-    final Result emptyState = new Result(Collections.<KeyValue> emptyList());
-    
-    // setup the state we expect for the codec
-    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
-    Configuration conf = new Configuration(false);
-    Mockito.when(env.getConfiguration()).thenReturn(conf);
-    LocalHBaseState table = new SimpleTableState(emptyState);
-
-    // make a new codec on those kvs
-    CoveredColumnIndexCodec codec =
-        CoveredColumnIndexCodec.getCodecForTesting(Arrays.asList(group));
-
-    // start with a basic put that has some keyvalues
-    Put p = new Put(PK);
-    // setup the kvs to add
-    List<KeyValue> kvs = new ArrayList<KeyValue>();
-    byte[] v1 = Bytes.toBytes("v1");
-    KeyValue kv = new KeyValue(PK, FAMILY, QUAL, 1, v1);
-    kvs.add(kv);
-    p.add(kv);
-    byte[] v2 = Bytes.toBytes("v2");
-    kv = new KeyValue(PK, Bytes.toBytes("family2"), QUAL, 1, v2);
-    kvs.add(kv);
-    p.add(kv);
-
-    // check the codec for deletes it should send
-    LocalTableState state = new LocalTableState(env, table, p);
-    Iterable<IndexUpdate> updates = codec.getIndexDeletes(state);
-    assertFalse("Found index updates without any existing kvs in table!", updates.iterator().next()
-        .isValid());
-
-    // get the updates with the pending update
-    state.setCurrentTimestamp(1);
-    state.addPendingUpdates(kvs);
-    updates = codec.getIndexUpserts(state);
-    assertTrue("Didn't find index updates for pending primary table update!", updates.iterator()
-        .hasNext());
-    for (IndexUpdate update : updates) {
-      assertTrue("Update marked as invalid, but should be a pending index write!", update.isValid());
-      Put m = (Put) update.getUpdate();
-      // should just be the single update for the column reference
-      byte[] expected =
-          CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1)));
-      assertArrayEquals("Didn't get expected index value", expected, m.getRow());
-    }
-
-    // then apply a delete
-    Delete d = new Delete(PK, 2);
-    // need to set the timestamp here, as would actually happen on the server, unlike what happens
-    // with puts, where the get the constructor specified timestamp for unspecified methods.
-    d.deleteFamily(FAMILY, 2);
-    // setup the next batch of 'current state', basically just ripping out the current state from
-    // the last round
-    table = new SimpleTableState(new Result(kvs));
-    state = new LocalTableState(env, table, d);
-    state.setCurrentTimestamp(2);
-    // check the cleanup of the current table, after the puts (mocking a 'next' update)
-    updates = codec.getIndexDeletes(state);
-    for (IndexUpdate update : updates) {
-      assertTrue("Didn't have any index cleanup, even though there is current state",
-        update.isValid());
-      Delete m = (Delete) update.getUpdate();
-      // should just be the single update for the column reference
-      byte[] expected =
-          CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1)));
-      assertArrayEquals("Didn't get expected index value", expected, m.getRow());
-    }
-    ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
-
-    // now with the delete of the columns
-    d = new Delete(PK, 2);
-    d.deleteColumns(FAMILY, QUAL, 2);
-    ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
-
-    // this delete needs to match timestamps exactly, by contract, to have any effect
-    d = new Delete(PK, 1);
-    d.deleteColumn(FAMILY, QUAL, 1);
-    ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
-  }
-
-  private void ensureNoUpdatesWhenCoveredByDelete(RegionCoprocessorEnvironment env, IndexCodec codec, List<KeyValue> currentState,
-      Delete d) throws IOException {
-    LocalHBaseState table = new SimpleTableState(new Result(currentState));
-    LocalTableState state = new LocalTableState(env, table, d);
-    state.setCurrentTimestamp(d.getTimeStamp());
-    // now we shouldn't see anything when getting the index update
-    state.addPendingUpdates(d.getFamilyMap().get(FAMILY));
-    Iterable<IndexUpdate> updates = codec.getIndexUpserts(state);
-    for (IndexUpdate update : updates) {
-      assertFalse("Had some index updates, though it should have been covered by the delete",
-        update.isValid());
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredIndexSpecifierBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredIndexSpecifierBuilder.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredIndexSpecifierBuilder.java
deleted file mode 100644
index ae06f3b..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestCoveredIndexSpecifierBuilder.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.example;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-
-public class TestCoveredIndexSpecifierBuilder {
-  private static final String FAMILY = "FAMILY";
-  private static final String FAMILY2 = "FAMILY2";
-  private static final String INDEX_TABLE = "INDEX_TABLE";
-  private static final String INDEX_TABLE2 = "INDEX_TABLE2";
-
-
-  @Test
-  public void testSimpleSerialziationDeserialization() throws Exception {
-    byte[] indexed_qualifer = Bytes.toBytes("indexed_qual");
-
-    //setup the index 
-    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
-    ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE);
-    // match a single family:qualifier pair
-    CoveredColumn col1 = new CoveredColumn(FAMILY, indexed_qualifer);
-    fam1.add(col1);
-    // matches the family2:* columns
-    CoveredColumn col2 = new CoveredColumn(FAMILY2, null);
-    fam1.add(col2);
-    builder.addIndexGroup(fam1);
-    ColumnGroup fam2 = new ColumnGroup(INDEX_TABLE2);
-    // match a single family2:qualifier pair
-    CoveredColumn col3 = new CoveredColumn(FAMILY2, indexed_qualifer);
-    fam2.add(col3);
-    builder.addIndexGroup(fam2);
-    
-    Configuration conf = new Configuration(false);
-    //convert the map that HTableDescriptor gets into the conf the coprocessor receives
-    Map<String, String> map = builder.convertToMap();
-    for(Entry<String, String> entry: map.entrySet()){
-      conf.set(entry.getKey(), entry.getValue());
-    }
-
-    List<ColumnGroup> columns = CoveredColumnIndexSpecifierBuilder.getColumns(conf);
-    assertEquals("Didn't deserialize the expected number of column groups", 2, columns.size());
-    ColumnGroup group = columns.get(0);
-    assertEquals("Didn't deserialize expected column in first group", col1, group.getColumnForTesting(0));
-    assertEquals("Didn't deserialize expected column in first group", col2, group.getColumnForTesting(1));
-    group = columns.get(1);
-    assertEquals("Didn't deserialize expected column in second group", col3, group.getColumnForTesting(0));
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
deleted file mode 100644
index 8107ff2..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndToEndCoveredIndexing.java
+++ /dev/null
@@ -1,877 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.example;
-
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.IndexTestingUtils;
-import org.apache.hadoop.hbase.index.Indexer;
-import org.apache.hadoop.hbase.index.TableName;
-
-/**
- * Test Covered Column indexing in an 'end-to-end' manner on a minicluster. This covers cases where
- * we manage custom timestamped updates that arrive in and out of order as well as just using the
- * generically timestamped updates.
- */
-public class TestEndToEndCoveredIndexing {
-  private static final Log LOG = LogFactory.getLog(TestEndToEndCoveredIndexing.class);
-  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static final String FAM_STRING = "FAMILY";
-  private static final byte[] FAM = Bytes.toBytes(FAM_STRING);
-  private static final String FAM2_STRING = "FAMILY2";
-  private static final byte[] FAM2 = Bytes.toBytes(FAM2_STRING);
-  private static final byte[] EMPTY_BYTES = new byte[0];
-  private static final byte[] indexed_qualifer = Bytes.toBytes("indexed_qual");
-  private static final byte[] regular_qualifer = Bytes.toBytes("reg_qual");
-  private static final byte[] row1 = Bytes.toBytes("row1");
-  private static final byte[] value1 = Bytes.toBytes("val1");
-  private static final byte[] value2 = Bytes.toBytes("val2");
-  private static final byte[] value3 = Bytes.toBytes("val3");
-  // match a single family:qualifier pair
-  private static final CoveredColumn col1 = new CoveredColumn(FAM_STRING, indexed_qualifer);
-  // matches the family2:* columns
-  private static final CoveredColumn col2 = new CoveredColumn(FAM2_STRING, null);
-  private static final CoveredColumn col3 = new CoveredColumn(FAM2_STRING, indexed_qualifer);
-  
-  @Rule
-  public TableName TestTable = new TableName();
-  
-  private ColumnGroup fam1;
-  private ColumnGroup fam2;
-
-  // setup a couple of index columns
-  private void setupColumns() {
-    fam1 = new ColumnGroup(getIndexTableName());
-    fam2 = new ColumnGroup(getIndexTableName() + "2");
-    // values are [col1][col2_1]...[col2_n]
-    fam1.add(col1);
-    fam1.add(col2);
-    // value is [col2]
-    fam2.add(col3);
-  }
-
-  private String getIndexTableName() {
-    return Bytes.toString(TestTable.getTableName()) + "_index";
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    // disable version checking, so we can test against whatever version of HBase happens to be
-    // installed (right now, its generally going to be SNAPSHOT versions).
-    conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    UTIL.startMiniCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws Exception {
-    UTIL.shutdownMiniCluster();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    setupColumns();
-  }
-
-  /**
-   * Test that a bunch of puts with a single timestamp across all the puts builds and inserts index
-   * entries as expected
-   * @throws Exception on failure
-   */
-  @Test
-  public void testSimpleTimestampedUpdates() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts = 10;
-    p.add(FAM, indexed_qualifer, ts, value1);
-    p.add(FAM, regular_qualifer, ts, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-
-    // verify that the index matches
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Test that the multiple timestamps in a single put build the correct index updates.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testMultipleTimestampsInSinglePut() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts1 = 10;
-    long ts2 = 11;
-    p.add(FAM, indexed_qualifer, ts1, value1);
-    p.add(FAM, regular_qualifer, ts1, value2);
-    // our group indexes all columns in the this family, so any qualifier here is ok
-    p.add(FAM2, regular_qualifer, ts2, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts1
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
-
-    // check the second entry at ts2
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Test that we make updates to multiple {@link ColumnGroup}s across a single put/delete 
-   * @throws Exception on failure
-   */
-  @Test
-  public void testMultipleConcurrentGroupsUpdated() throws Exception {
-    HTable primary = createSetupTables(fam1, fam2);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts = 10;
-    p.add(FAM, indexed_qualifer, ts, value1);
-    p.add(FAM, regular_qualifer, ts, value2);
-    p.add(FAM2, indexed_qualifer, ts, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-    HTable index2 = new HTable(UTIL.getConfiguration(), fam2.getTable());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // and check the second index as well
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index2, expected, ts, value3);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1, index2);
-  }
-
-  /**
-   * HBase has a 'fun' property wherein you can completely clobber an existing row if you make a
-   * {@link Put} at the exact same dimension (row, cf, cq, ts) as an existing row. The old row
-   * disappears and the new value (since the rest of the row is the same) completely subsumes it.
-   * This test ensures that we remove the old entry and put a new entry in its place.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testOverwritingPutsCorrectlyGetIndexed() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts = 10;
-    p.add(FAM, indexed_qualifer, ts, value1);
-    p.add(FAM, regular_qualifer, ts, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // now overwrite the put in the primary table with a new value
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value3);
-    // and verify that a scan at the first entry returns nothing (ignore the updated row)
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts,
-      value1, value2);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-  
-  @Test
-  public void testSimpleDeletes() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a simple Put
-    long ts = 10;
-    Put p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts, value1);
-    p.add(FAM, regular_qualifer, ts, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    Delete d = new Delete(row1);
-    primary.delete(d);
-
-    HTable index = new HTable(UTIL.getConfiguration(), fam1.getTable());
-    List<KeyValue> expected = Collections.<KeyValue> emptyList();
-    // scan over all time should cause the delete to be covered
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, 0, Long.MAX_VALUE, value1,
-      HConstants.EMPTY_END_ROW);
-
-    // scan at the older timestamp should still show the older value
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts, value1);
-
-    // cleanup
-    closeAndCleanupTables(index, primary);
-  }
-
-  /**
-   * If we don't have any updates to make to the index, we don't take a lock on the WAL. However, we
-   * need to make sure that we don't try to unlock the WAL on write time when we don't write
-   * anything, since that will cause an java.lang.IllegalMonitorStateException
-   * @throws Exception on failure
-   */
-  @Test
-  public void testDeletesWithoutPreviousState() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a delete on the primary table (no data, so no index updates...hopefully).
-    long ts = 10;
-    Delete d = new Delete(row1);
-    primary.delete(d);
-
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-    List<KeyValue> expected = Collections.<KeyValue> emptyList();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // a delete of a specific family/column should also not show any index updates
-    d = new Delete(row1);
-    d.deleteColumn(FAM, indexed_qualifer);
-    primary.delete(d);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // also just a family marker should have the same effect
-    d = new Delete(row1);
-    d.deleteFamily(FAM);
-    primary.delete(d);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // also just a family marker should have the same effect
-    d = new Delete(row1);
-    d.deleteColumns(FAM, indexed_qualifer);
-    primary.delete(d);
-    primary.flushCommits();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Similar to the {@link #testMultipleTimestampsInSinglePut()}, this check the same with deletes
-   * @throws Exception on failure
-   */
-  @Test
-  public void testMultipleTimestampsInSingleDelete() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts1 = 10, ts2 = 11, ts3 = 12;
-    p.add(FAM, indexed_qualifer, ts1, value1);
-    // our group indexes all columns in the this family, so any qualifier here is ok
-    p.add(FAM2, regular_qualifer, ts2, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    // check to make sure everything we expect is there
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // ts1, we just have v1
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
-
-    // at ts2, don't have the above anymore
-    pairs.clear();
-    expected = Collections.emptyList();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts2 + 1, value1, value1);
-    // but we do have the new entry at ts2
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
-
-    // now build up a delete with a couple different timestamps
-    Delete d = new Delete(row1);
-    // these deletes have to match the exact ts since we are doing an exact match (deleteColumn).
-    d.deleteColumn(FAM, indexed_qualifer, ts1);
-    // since this doesn't match exactly, we actually shouldn't see a change in table state
-    d.deleteColumn(FAM2, regular_qualifer, ts3);
-    primary.delete(d);
-
-    // at ts1, we should have the put covered exactly by the delete and into the entire future
-    expected = Collections.emptyList();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, Long.MAX_VALUE, value1,
-      value1);
-
-    // at ts2, we should just see value3
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
-
-    // the later delete is a point delete, so we shouldn't see any change at ts3
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts3, value1,
-      HConstants.EMPTY_END_ROW);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the
-   * given time. If its modifying the latest state, we don't need to do anything but add deletes. If
-   * its modifying back in time state, we need to just fix up the surrounding elements as anything
-   * else ahead of it will be fixed up by later updates.
-   * <p>
-   * similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testDeleteColumnsInThePast() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts1 = 10, ts2 = 11, ts3 = 12;
-    p.add(FAM, indexed_qualifer, ts1, value1);
-    p.add(FAM2, regular_qualifer, ts2, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    // now build up a delete with a couple different timestamps
-    Delete d = new Delete(row1);
-    // these deletes don't need to match the exact ts because they cover everything earlier
-    d.deleteColumns(FAM, indexed_qualifer, ts2);
-    d.deleteColumns(FAM2, regular_qualifer, ts3);
-    primary.delete(d);
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts1
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1);
-
-    // delete at ts2 changes what the put would insert
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1);
-
-    // final delete clears out everything
-    expected = Collections.emptyList();
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-  
-  /**
-   * If the client is using custom timestamps is possible that the updates come out-of-order (i.e.
-   * update to ts 10 comes after the update to ts 12). In the case, we need to be sure that the
-   * index is correctly updated when the out of order put arrives.
-   * @throws Exception
-   */
-  @Test
-  public void testOutOfOrderUpdates() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts = 12;
-    p.add(FAM, indexed_qualifer, ts, value1);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check the first entry at ts
-    List<KeyValue> expectedTs1 = CoveredColumnIndexCodec
-        .getIndexKeyValueForTesting(row1, ts, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs1, ts, value1);
-
-    // now make a put back in time
-    long ts2 = ts - 2;
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts2, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value2, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-
-    // check to make sure the back in time entry exists
-    List<KeyValue> expectedTs2 = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2,
-      pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs2, ts2, value2);
-    // then it should be gone at the newer ts (because it deletes itself)
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts2,
-      ts + 1, value2, HConstants.EMPTY_END_ROW);
-
-    // but that the original index entry is still visible at ts, just fine
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expectedTs1, ts, value1);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Its possible (i.e. from a fast, frequently writing client) that they put more than the
-   * 'visible' number of versions in a row before a client make a put 'back in time' on that row. If
-   * we don't scan the current table properly, we won't see an index update for that 'back in time'
-   * update since the usual lookup will only see the regular number of versions. This ability to see
-   * back in time depends on running HBase 0.94.9
-   * @throws Exception on failure
-   */
-  @Test
-  public void testExceedVersionsOutOfOrderPut() throws Exception {
-    // setup the index
-    HTable primary = createSetupTables(fam2);
-
-    // do a put to the primary table
-    Put p = new Put(row1);
-    long ts1 = 1, ts2 = 2, ts3 = 3, ts4 = 4, ts5 = 5;
-    byte[] value4 = Bytes.toBytes("val4");
-    byte[] value5 = Bytes.toBytes("val5");
-    p.add(FAM2, indexed_qualifer, ts1, value1);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM2, indexed_qualifer, ts3, value3);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM2, indexed_qualifer, ts4, value4);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM2, indexed_qualifer, ts5, value5);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index = new HTable(UTIL.getConfiguration(), fam2.getTable());
-
-    // do a raw scan of everything in the table
-    if (LOG.isDebugEnabled()) {
-      // the whole table, all the keys
-      Scan s = new Scan();
-      s.setRaw(true);
-      ResultScanner scanner = index.getScanner(s);
-      for (Result r : scanner) {
-        LOG.debug("Found row:" + r);
-      }
-      scanner.close();
-    }
-
-    /*
-     * now we have definitely exceeded the number of versions visible to a usual client of the
-     * primary table, so we should try doing a put 'back in time' an make sure that has the correct
-     * index values and cleanup
-     */
-    p = new Put(row1);
-    p.add(FAM2, indexed_qualifer, ts2, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    // // read the index for the expected values
-    // HTable index = new HTable(UTIL.getConfiguration(), fam2.getTable());
-    //
-    // do a raw scan of everything in the table
-    if (LOG.isDebugEnabled()) {
-      // the whole table, all the keys
-      Scan s = new Scan();
-      s.setRaw(true);
-      ResultScanner scanner = index.getScanner(s);
-      for (Result r : scanner) {
-        LOG.debug("Found row:" + r);
-      }
-      scanner.close();
-    }
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col3));
-
-    // check the value1 should be present at the earliest timestamp
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts1, value1, value2);
-
-    // and value1 should be removed at ts2 (even though it came later)
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, Collections.<KeyValue> emptyList(), ts1,
-      ts2 + 1, value1, value2); // timestamp + 1 since its exclusive end timestamp
-
-    // late added column should be there just fine at its timestamp
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value2, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts2, value2);
-
-    // and check that the late entry also removes its self at the next timestamp up
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, Collections.<KeyValue> emptyList(), ts3,
-      value2, value3);
-
-    // then we should have the rest of the inserts at their appropriate timestamps. Everything else
-    // should be exactly the same, except we shouldn't see ts0 anymore at ts2
-
-    // check the third entry
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts3, value3);
-
-    // check the fourth entry
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value4, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts4, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts4, value4);
-
-    // check the first entry at ts4
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value2, col3));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts2, value2);
-    // verify that we remove the entry, even though its too far 'back in time'
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index, Collections.<KeyValue> emptyList(), ts3,
-      value4);
-
-    // cleanup
-    closeAndCleanupTables(primary, index);
-  }
-
-  /**
-   * Similar to {@link #testExceedVersionsOutOfOrderPut()}, but mingles deletes and puts.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testExceedVersionsOutOfOrderUpdates() throws Exception {
-    HTable primary = createSetupTables(fam1);
-
-    // setup the data to store
-    long ts1 = 1, ts2 = 2, ts3 = 3, ts4 = 4, ts5 = 5, ts6 = 6;
-    byte[] value4 = Bytes.toBytes("val4"), value5 = Bytes.toBytes("val5"), value6 =
-        Bytes.toBytes("val6");
-    // values for the other column to index
-    byte[] v1_1 = ArrayUtils.addAll(value1, Bytes.toBytes("_otherCol")), v3_1 =
-        ArrayUtils.addAll(value3, Bytes.toBytes("_otherCol")), v5_1 =
-        ArrayUtils.addAll(value5, Bytes.toBytes("_otherCol")), v6_1 =
-        ArrayUtils.addAll(value6, Bytes.toBytes("_otherCol"));
-
-    // make some puts to the primary table
-    Put p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts1, value1);
-    p.add(FAM2, indexed_qualifer, ts1, v1_1);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts3, value3);
-    p.add(FAM2, indexed_qualifer, ts3, v3_1);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts5, value5);
-    p.add(FAM2, indexed_qualifer, ts5, v5_1);
-    primary.put(p);
-    primary.flushCommits();
-
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts6, value6);
-    p.add(FAM2, indexed_qualifer, ts6, v6_1);
-    primary.put(p);
-    primary.flushCommits();
-
-    /*
-     * now we have definitely exceeded the number of versions visible to a usual client of the
-     * primary table, so we should try doing a put 'back in time' an make sure that has the correct
-     * index values and cleanup
-     */
-    p = new Put(row1);
-    p.add(FAM, indexed_qualifer, ts2, value2);
-    primary.put(p);
-    primary.flushCommits();
-
-    // read the index for the expected values
-    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());
-
-    // do a raw scan of everything in the table
-    if (LOG.isDebugEnabled()) {
-      Scan s = new Scan();
-      s.setRaw(true);
-      ResultScanner scanner = index1.getScanner(s);
-      for (Result r : scanner) {
-        LOG.debug("Found row:" + r);
-      }
-      scanner.close();
-    }
-
-    // build the expected kvs
-    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
-    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-
-    // check the value1 should be present at the earliest timestamp
-    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1, value2);
-
-    // and value1 should be removed at ts2 (even though it came later)
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts1,
-      ts2 + 1, value1, value2); // timestamp + 1 since its exclusive end timestamp
-
-    // late added column should be there just fine at its timestamp
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value2, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value2);
-
-    // and check that the late entry also removes its self at the next timestamp up
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts3,
-      value2, value3);
-
-    // -----------------------------------------------
-    // Check Delete intermingled
-    // -----------------------------------------------
-
-    // verify that the old row is there
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v3_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
-    // scan from the start key forward (should only include [value3][v3_3])
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, expected.get(0).getKey(),
-      value4);
-
-    // then do a delete of just one of the indexed columns. This should insert a delete for all just
-    // the single value, then a put & a later corresponding in the past for the new value
-    Delete d = new Delete(row1);
-    d.deleteColumn(FAM2, indexed_qualifer, ts3);
-    primary.delete(d);
-
-    // we shouldn't find that entry, but we should find [value3][v1_1] since that is next entry back
-    // in time from the current
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
-    // it should be re-written at 3
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value3, value4);
-
-    // but we shouldn't find it at ts5 since it should be covered again
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue> emptyList(), ts5,
-      value3, value4);
-
-    // now remove all the older columns in FAM2 at 4
-    d = new Delete(row1);
-    d.deleteColumns(FAM2, indexed_qualifer, ts4);
-    primary.delete(d);
-
-    // we shouldn't find that entry, but we should find [value3][null] since deleteColumns removes
-    // all the entries for that column
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts4, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts4, value3, value4);
-
-    // same as above, but now do it at 3 (on earlier)
-    d = new Delete(row1);
-    d.deleteColumns(FAM2, indexed_qualifer, ts3);
-    primary.delete(d);
-
-    // we shouldn't find that entry, but we should find [value3][null] since deleteColumns removes
-    // all the entries for that column
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value3, value4);
-
-    // -----------------------------------------------
-    // then we should have the rest of the inserts at their appropriate timestamps. Everything else
-    // should be exactly the same, except we shouldn't see ts0 anymore at ts2
-    // -----------------------------------------------
-
-    // check the entry at 5
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value5, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v5_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts5, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts5, value5);
-
-    // check the entry at 6
-    pairs.clear();
-    pairs.add(new Pair<byte[], CoveredColumn>(value6, col1));
-    pairs.add(new Pair<byte[], CoveredColumn>(v6_1, col2));
-    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts6, pairs);
-    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts6, value5);
-
-    // cleanup
-    closeAndCleanupTables(primary, index1);
-  }
-
-  /**
-   * Create the primary table (to which you should write), setup properly for indexing the given
-   * {@link ColumnGroup}s. Also creates the necessary index tables to match the passes groups.
-   * @param groups {@link ColumnGroup}s to index, creating one index table per column group.
-   * @return reference to the primary table
-   * @throws IOException if there is an issue communicating with HBase
-   */
-  private HTable createSetupTables(ColumnGroup... groups) throws IOException {
-    HBaseAdmin admin = UTIL.getHBaseAdmin();
-    // setup the index
-    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
-    for (ColumnGroup group : groups) {
-      builder.addIndexGroup(group);
-      // create the index tables
-      CoveredColumnIndexer.createIndexTable(admin, group.getTable());
-    }
-
-    // setup the primary table
-    String indexedTableName = Bytes.toString(TestTable.getTableName());
-    HTableDescriptor pTable = new HTableDescriptor(indexedTableName);
-    pTable.addFamily(new HColumnDescriptor(FAM));
-    pTable.addFamily(new HColumnDescriptor(FAM2));
-    builder.build(pTable);
-
-    // create the primary table
-    admin.createTable(pTable);
-    HTable primary = new HTable(UTIL.getConfiguration(), indexedTableName);
-    primary.setAutoFlush(false);
-    return primary;
-  }
-
-  private void closeAndCleanupTables(HTable... tables) throws IOException {
-    if (tables == null) {
-      return;
-    }
-
-    for (HTable table : tables) {
-      table.close();
-      UTIL.deleteTable(table.getTableName());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
deleted file mode 100644
index 570c1d9..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestEndtoEndIndexingWithCompression.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.example;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
-import org.apache.hadoop.hbase.regionserver.wal.WALEditCodec;
-import org.junit.BeforeClass;
-
-import org.apache.hadoop.hbase.index.IndexTestingUtils;
-import org.apache.hadoop.hbase.index.Indexer;
-
-/**
- * Test secondary indexing from an end-to-end perspective (client to server to index table).
- */
-public class TestEndtoEndIndexingWithCompression extends TestEndToEndCoveredIndexing {
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    //add our codec and enable WAL compression
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    // disable version checking, so we can test against whatever version of HBase happens to be
-    // installed (right now, its generally going to be SNAPSHOT versions).
-    conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
-    conf.set(WALEditCodec.WAL_EDIT_CODEC_CLASS_KEY,
-    IndexedWALEditCodec.class.getName());
-    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
-    
-    //start the mini-cluster
-    UTIL.startMiniCluster();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java
deleted file mode 100644
index 099755a..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/example/TestFailWithoutRetries.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.example;
-
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.IndexTestingUtils;
-import org.apache.hadoop.hbase.index.TableName;
-import org.apache.hadoop.hbase.index.covered.IndexUpdate;
-import org.apache.hadoop.hbase.index.covered.TableState;
-import org.apache.hadoop.hbase.index.util.IndexManagementUtil;
-import org.apache.phoenix.index.BaseIndexCodec;
-
-/**
- * If {@link DoNotRetryIOException} is not subclassed correctly (with the {@link String}
- * constructor), {@link MultiResponse#readFields(java.io.DataInput)} will not correctly deserialize
- * the exception, and just return <tt>null</tt> to the client, which then just goes and retries.
- */
-public class TestFailWithoutRetries {
-
-  private static final Log LOG = LogFactory.getLog(TestFailWithoutRetries.class);
-  @Rule
-  public TableName table = new TableName();
-
-  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  private String getIndexTableName() {
-    return Bytes.toString(table.getTableName()) + "_index";
-  }
-
-  public static class FailingTestCodec extends BaseIndexCodec {
-
-    @Override
-    public Iterable<IndexUpdate> getIndexDeletes(TableState state) throws IOException {
-      throw new RuntimeException("Intentionally failing deletes for "
-          + TestFailWithoutRetries.class.getName());
-    }
-
-    @Override
-    public Iterable<IndexUpdate> getIndexUpserts(TableState state) throws IOException {
-      throw new RuntimeException("Intentionally failing upserts for "
-          + TestFailWithoutRetries.class.getName());
-    }
-
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    // setup and verify the config
-    Configuration conf = UTIL.getConfiguration();
-    IndexTestingUtils.setupConfig(conf);
-    IndexManagementUtil.ensureMutableIndexingCorrectlyConfigured(conf);
-    // start the cluster
-    UTIL.startMiniCluster();
-  }
-
-  @AfterClass
-  public static void teardownCluster() throws Exception {
-    UTIL.shutdownMiniCluster();
-  }
-
-  /**
-   * If this test times out, then we didn't fail quickly enough. {@link Indexer} maybe isn't
-   * rethrowing the exception correctly?
-   * <p>
-   * We use a custom codec to enforce the thrown exception.
-   * @throws Exception
-   */
-  @Test(timeout = 300000)
-  public void testQuickFailure() throws Exception {
-    // incorrectly setup indexing for the primary table - target index table doesn't exist, which
-    // should quickly return to the client
-    byte[] family = Bytes.toBytes("family");
-    ColumnGroup fam1 = new ColumnGroup(getIndexTableName());
-    // values are [col1]
-    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
-    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
-    // add the index family
-    builder.addIndexGroup(fam1);
-    // usually, we would create the index table here, but we don't for the sake of the test.
-
-    // setup the primary table
-    String primaryTable = Bytes.toString(table.getTableName());
-    HTableDescriptor pTable = new HTableDescriptor(primaryTable);
-    pTable.addFamily(new HColumnDescriptor(family));
-    // override the codec so we can use our test one
-    builder.build(pTable, FailingTestCodec.class);
-
-    // create the primary table
-    HBaseAdmin admin = UTIL.getHBaseAdmin();
-    admin.createTable(pTable);
-    Configuration conf = new Configuration(UTIL.getConfiguration());
-    // up the number of retries/wait time to make it obvious that we are failing with retries here
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20);
-    conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000);
-    HTable primary = new HTable(conf, primaryTable);
-    primary.setAutoFlush(false, true);
-
-    // do a simple put that should be indexed
-    Put p = new Put(Bytes.toBytes("row"));
-    p.add(family, null, Bytes.toBytes("value"));
-    primary.put(p);
-    try {
-      primary.flushCommits();
-      fail("Shouldn't have gotten a successful write to the primary table");
-    } catch (RetriesExhaustedWithDetailsException e) {
-      LOG.info("Correclty got a failure of the put!");
-    }
-    primary.close();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java
deleted file mode 100644
index a84477a..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
-
-/**
- * Test filter to ensure that it correctly handles KVs of different types correctly
- */
-public class TestApplyAndFilterDeletesFilter {
-
-  private static final Set<ImmutableBytesPtr> EMPTY_SET = Collections
-      .<ImmutableBytesPtr> emptySet();
-  private byte[] row = Bytes.toBytes("row");
-  private byte[] family = Bytes.toBytes("family");
-  private byte[] qualifier = Bytes.toBytes("qualifier");
-  private byte[] value = Bytes.toBytes("value");
-  private long ts = 10;
-
-  @Test
-  public void testDeletesAreNotReturned() {
-    KeyValue kv = createKvForType(Type.Delete);
-    ApplyAndFilterDeletesFilter filter = new ApplyAndFilterDeletesFilter(EMPTY_SET);
-    assertEquals("Didn't skip point delete!", ReturnCode.SKIP, filter.filterKeyValue(kv));
-
-    filter.reset();
-    kv = createKvForType(Type.DeleteColumn);
-    assertEquals("Didn't skip from column delete!", ReturnCode.SKIP, filter.filterKeyValue(kv));
-
-    filter.reset();
-    kv = createKvForType(Type.DeleteFamily);
-    assertEquals("Didn't skip from family delete!", ReturnCode.SKIP, filter.filterKeyValue(kv));
-  }
-
-  /**
-   * Hinting with this filter is a little convoluted as we binary search the list of families to
-   * attempt to find the right one to seek.
-   */
-  @Test
-  public void testHintCorrectlyToNextFamily() {
-    // start with doing a family delete, so we will seek to the next column
-    KeyValue kv = createKvForType(Type.DeleteFamily);
-    ApplyAndFilterDeletesFilter filter = new ApplyAndFilterDeletesFilter(EMPTY_SET);
-    assertEquals(ReturnCode.SKIP, filter.filterKeyValue(kv));
-    KeyValue next = createKvForType(Type.Put);
-    // make sure the hint is our attempt at the end key, because we have no more families to seek
-    assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT,
-      filter.filterKeyValue(next));
-    assertEquals("Didn't get END_KEY with no families to match", KeyValue.LOWESTKEY,
-      filter.getNextKeyHint(next));
-
-    // check for a family that comes before our family, so we always seek to the end as well
-    filter = new ApplyAndFilterDeletesFilter(asSet(Bytes.toBytes("afamily")));
-    assertEquals(ReturnCode.SKIP, filter.filterKeyValue(kv));
-    // make sure the hint is our attempt at the end key, because we have no more families to seek
-    assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT,
-      filter.filterKeyValue(next));
-    assertEquals("Didn't get END_KEY with no families to match", KeyValue.LOWESTKEY,
-      filter.getNextKeyHint(next));
-
-    // check that we seek to the correct family that comes after our family
-    byte[] laterFamily = Bytes.toBytes("zfamily");
-    filter = new ApplyAndFilterDeletesFilter(asSet(laterFamily));
-    assertEquals(ReturnCode.SKIP, filter.filterKeyValue(kv));
-    KeyValue expected = KeyValue.createFirstOnRow(kv.getRow(), laterFamily, new byte[0]);
-    assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT,
-      filter.filterKeyValue(next));
-    assertEquals("Didn't get correct next key with a next family", expected,
-      filter.getNextKeyHint(next));
-  }
-
-  /**
-   * Point deletes should only cover the exact entry they are tied to. Earlier puts should always
-   * show up.
-   */
-  @Test
-  public void testCoveringPointDelete() {
-    // start with doing a family delete, so we will seek to the next column
-    KeyValue kv = createKvForType(Type.Delete);
-    ApplyAndFilterDeletesFilter filter = new ApplyAndFilterDeletesFilter(EMPTY_SET);
-    filter.filterKeyValue(kv);
-    KeyValue put = createKvForType(Type.Put);
-    assertEquals("Didn't filter out put with same timestamp!", ReturnCode.SKIP,
-      filter.filterKeyValue(put));
-    // we should filter out the exact same put again, which could occur with the kvs all kept in the
-    // same memstore
-    assertEquals("Didn't filter out put with same timestamp on second call!", ReturnCode.SKIP,
-      filter.filterKeyValue(put));
-
-    // ensure then that we don't filter out a put with an earlier timestamp (though everything else
-    // matches)
-    put = createKvForType(Type.Put, ts - 1);
-    assertEquals("Didn't accept put that has an earlier ts than the covering delete!",
-      ReturnCode.INCLUDE, filter.filterKeyValue(put));
-  }
-
-  private KeyValue createKvForType(Type t) {
-    return createKvForType(t, this.ts);
-  }
-
-  private KeyValue createKvForType(Type t, long timestamp) {
-    return new KeyValue(row, family, qualifier, 0, qualifier.length, timestamp, t, value, 0,
-        value.length);
-  }
-
-  /**
-   * Test that when we do a column delete at a given timestamp that we delete the entire column.
-   * @throws Exception
-   */
-  @Test
-  public void testCoverForDeleteColumn() throws Exception {
-    ApplyAndFilterDeletesFilter filter = new ApplyAndFilterDeletesFilter(EMPTY_SET);
-    KeyValue dc = createKvForType(Type.DeleteColumn, 11);
-    KeyValue put = createKvForType(Type.Put, 10);
-    assertEquals("Didn't filter out delete column.", ReturnCode.SKIP, filter.filterKeyValue(dc));
-    assertEquals("Didn't get a seek hint for the deleted column", ReturnCode.SEEK_NEXT_USING_HINT,
-      filter.filterKeyValue(put));
-    // seek past the given put
-    KeyValue seek = filter.getNextKeyHint(put);
-    assertTrue("Seeked key wasn't past the expected put - didn't skip the column",
-      KeyValue.COMPARATOR.compare(seek, put) > 0);
-  }
-
-  /**
-   * DeleteFamily markers should delete everything from that timestamp backwards, but not hide
-   * anything forwards
-   */
-  @Test
-  public void testDeleteFamilyCorrectlyCoversColumns() {
-    ApplyAndFilterDeletesFilter filter = new ApplyAndFilterDeletesFilter(EMPTY_SET);
-    KeyValue df = createKvForType(Type.DeleteFamily, 11);
-    KeyValue put = createKvForType(Type.Put, 12);
-
-    assertEquals("Didn't filter out delete family", ReturnCode.SKIP, filter.filterKeyValue(df));
-    assertEquals("Filtered out put with newer TS than delete family", ReturnCode.INCLUDE,
-      filter.filterKeyValue(put));
-
-    // older kv shouldn't be visible
-    put = createKvForType(Type.Put, 10);
-    assertEquals("Didn't filter out older put, covered by DeleteFamily marker",
-      ReturnCode.SEEK_NEXT_USING_HINT, filter.filterKeyValue(put));
-
-    // next seek should be past the families
-    assertEquals(KeyValue.LOWESTKEY, filter.getNextKeyHint(put));
-  }
-
-  /**
-   * Test that we don't cover other columns when we have a delete column.
-   */
-  @Test
-  public void testDeleteColumnCorrectlyCoversColumns() {
-    ApplyAndFilterDeletesFilter filter = new ApplyAndFilterDeletesFilter(EMPTY_SET);
-    KeyValue d = createKvForType(Type.DeleteColumn, 12);
-    byte[] qual2 = Bytes.add(qualifier, Bytes.toBytes("-other"));
-    KeyValue put =
-        new KeyValue(row, family, qual2, 0, qual2.length, 11, Type.Put, value, 0,
-            value.length);
-
-    assertEquals("Didn't filter out delete column", ReturnCode.SKIP, filter.filterKeyValue(d));
-    // different column put should still be visible
-    assertEquals("Filtered out put with different column than the delete", ReturnCode.INCLUDE,
-      filter.filterKeyValue(put));
-
-    // set a delete family, but in the past
-    d = createKvForType(Type.DeleteFamily, 10);
-    assertEquals("Didn't filter out delete column", ReturnCode.SKIP, filter.filterKeyValue(d));
-    // add back in the original delete column
-    d = createKvForType(Type.DeleteColumn, 11);
-    assertEquals("Didn't filter out delete column", ReturnCode.SKIP, filter.filterKeyValue(d));
-    // onto a different family, so that must be visible too
-    assertEquals("Filtered out put with different column than the delete", ReturnCode.INCLUDE,
-      filter.filterKeyValue(put));
-  }
-
-  private static Set<ImmutableBytesPtr> asSet(byte[]... strings) {
-    Set<ImmutableBytesPtr> set = new HashSet<ImmutableBytesPtr>();
-    for (byte[] s : strings) {
-      set.add(new ImmutableBytesPtr(s));
-    }
-    return set;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 4412d75..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-
-/**
- * Test that the family only filter only allows a single family through
- */
-public class TestFamilyOnlyFilter {
-
-  byte[] row = new byte[] { 'a' };
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testPassesFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family!", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testPassesTargetFamilyAsNonFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testResetFilter() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    // we shouldn't match the family again - everything after a switched family should be ignored
-    code = filter.filterKeyValue(accept);
-    assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.SKIP, code);
-
-    // reset the filter and we should accept it again
-    filter.reset();
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family after reset", ReturnCode.INCLUDE, code);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestNewerTimestampFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestNewerTimestampFilter.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestNewerTimestampFilter.java
deleted file mode 100644
index 6e61733..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/filter/TestNewerTimestampFilter.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-
-public class TestNewerTimestampFilter {
-  byte[] row = new byte[] { 'a' };
-  byte[] fam = Bytes.toBytes("family");
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testOnlyAllowsOlderTimestamps() {
-    long ts = 100;
-    NewerTimestampFilter filter = new NewerTimestampFilter(ts);
-
-    KeyValue kv = new KeyValue(row, fam, qual, ts, val);
-    assertEquals("Didn't accept kv with matching ts", ReturnCode.INCLUDE, filter.filterKeyValue(kv));
-
-    kv = new KeyValue(row, fam, qual, ts + 1, val);
-    assertEquals("Didn't skip kv with greater ts", ReturnCode.SKIP, filter.filterKeyValue(kv));
-
-    kv = new KeyValue(row, fam, qual, ts - 1, val);
-    assertEquals("Didn't accept kv with lower ts", ReturnCode.INCLUDE, filter.filterKeyValue(kv));
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/update/TestIndexUpdateManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/update/TestIndexUpdateManager.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/update/TestIndexUpdateManager.java
deleted file mode 100644
index 633ffd5..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/covered/update/TestIndexUpdateManager.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered.update;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.covered.update.IndexUpdateManager;
-
-public class TestIndexUpdateManager {
-
-  private static final byte[] row = Bytes.toBytes("row");
-  private static final String TABLE_NAME = "table";
-  private static final byte[] table = Bytes.toBytes(TABLE_NAME);
-
-  @Test
-  public void testMutationComparator() throws Exception {
-    IndexUpdateManager manager = new IndexUpdateManager();
-    Comparator<Mutation> comparator = manager.COMPARATOR;
-    Put p = new Put(row, 10);
-    // lexigraphically earlier should sort earlier
-    Put p1 = new Put(Bytes.toBytes("ro"), 10);
-    assertTrue("lexigraphically later sorting first, should be earlier first.",
-      comparator.compare(p, p1) > 0);
-    p1 = new Put(Bytes.toBytes("row1"), 10);
-    assertTrue("lexigraphically later sorting first, should be earlier first.",
-      comparator.compare(p1, p) > 0);
-
-    // larger ts sorts before smaller, for the same row
-    p1 = new Put(row, 11);
-    assertTrue("Smaller timestamp sorting first, should be larger first.",
-      comparator.compare(p, p1) > 0);
-    // still true, even for deletes
-    Delete d = new Delete(row, 11);
-    assertTrue("Smaller timestamp sorting first, should be larger first.",
-      comparator.compare(p, d) > 0);
-
-    // for the same row, t1, the delete should sort earlier
-    d = new Delete(row, 10);
-    assertTrue("Delete doesn't sort before put, for the same row and ts",
-      comparator.compare(p, d) > 0);
-
-    // but for different rows, we still respect the row sorting.
-    d = new Delete(Bytes.toBytes("row1"), 10);
-    assertTrue("Delete doesn't sort before put, for the same row and ts",
-      comparator.compare(p, d) < 0);
-  }
-
-  /**
-   * When making updates we need to cancel out {@link Delete} and {@link Put}s for the same row.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testCancelingUpdates() throws Exception {
-    IndexUpdateManager manager = new IndexUpdateManager();
-
-    long ts1 = 10, ts2 = 11;
-    // at different timestamps, so both should be retained
-    Delete d = new Delete(row, ts1);
-    Put p = new Put(row, ts2);
-    manager.addIndexUpdate(table, d);
-    manager.addIndexUpdate(table, p);
-    List<Mutation> pending = new ArrayList<Mutation>();
-    pending.add(p);
-    pending.add(d);
-    validate(manager, pending);
-
-    // add a delete that should cancel out the put, leading to only one delete remaining
-    Delete d2 = new Delete(row, ts2);
-    manager.addIndexUpdate(table, d2);
-    pending.add(d);
-    validate(manager, pending);
-
-    // double-deletes of the same row only retain the existing one, which was already canceled out
-    // above
-    Delete d3 = new Delete(row, ts2);
-    manager.addIndexUpdate(table, d3);
-    pending.add(d);
-    validate(manager, pending);
-
-    // if there is just a put and a delete at the same ts, no pending updates should be returned
-    manager = new IndexUpdateManager();
-    manager.addIndexUpdate(table, d2);
-    manager.addIndexUpdate(table, p);
-    validate(manager, Collections.<Mutation> emptyList());
-
-    // different row insertions can be tricky too, if you don't get the base cases right
-    manager = new IndexUpdateManager();
-    manager.addIndexUpdate(table, p);
-    // this row definitely sorts after the current row
-    byte[] row1 = Bytes.toBytes("row1");
-    Put p1 = new Put(row1, ts1);
-    manager.addIndexUpdate(table, p1);
-    // this delete should completely cover the given put and both should be removed
-    Delete d4 = new Delete(row1, ts1);
-    manager.addIndexUpdate(table, d4);
-    pending.clear();
-    pending.add(p);
-    validate(manager, pending);
-  }
-
-  private void validate(IndexUpdateManager manager, List<Mutation> pending) {
-    for (Pair<Mutation, byte[]> entry : manager.toMap()) {
-      assertEquals("Table name didn't match for stored entry!", table, entry.getSecond());
-      Mutation m = pending.remove(0);
-      // test with == to match the exact entries, Mutation.equals just checks the row
-      assertTrue(
-        "Didn't get the expected mutation! Expected: " + m + ", but got: " + entry.getFirst(),
-        m == entry.getFirst());
-    }
-    assertTrue("Missing pending updates: " + pending, pending.isEmpty());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/parallel/TestThreadPoolBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/parallel/TestThreadPoolBuilder.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/parallel/TestThreadPoolBuilder.java
deleted file mode 100644
index ce1a539..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/index/parallel/TestThreadPoolBuilder.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.parallel;
-
-import static org.junit.Assert.*;
-
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Rule;
-import org.junit.Test;
-
-import org.apache.hadoop.hbase.index.TableName;
-
-public class TestThreadPoolBuilder {
-
-  @Rule
-  public TableName name = new TableName();
-
-  @Test
-  public void testCoreThreadTimeoutNonZero() {
-    Configuration conf = new Configuration(false);
-    String key = name.getTableNameString()+"-key";
-    ThreadPoolBuilder builder = new ThreadPoolBuilder(name.getTableNameString(), conf);
-    assertTrue("core threads not set, but failed return", builder.getKeepAliveTime() > 0);
-    // set an negative value
-    builder.setCoreTimeout(key, -1);
-    assertTrue("core threads not set, but failed return", builder.getKeepAliveTime() > 0);
-    // set a positive value
-    builder.setCoreTimeout(key, 1234);
-    assertEquals("core threads not set, but failed return", 1234, builder.getKeepAliveTime());
-    // set an empty value
-    builder.setCoreTimeout(key);
-    assertTrue("core threads not set, but failed return", builder.getKeepAliveTime() > 0);
-  }
-  
-  @Test
-  public void testMaxThreadsNonZero() {
-    Configuration conf = new Configuration(false);
-    String key = name.getTableNameString()+"-key";
-    ThreadPoolBuilder builder = new ThreadPoolBuilder(name.getTableNameString(), conf);
-    assertTrue("core threads not set, but failed return", builder.getMaxThreads() > 0);
-    // set an negative value
-    builder.setMaxThread(key, -1);
-    assertTrue("core threads not set, but failed return", builder.getMaxThreads() > 0);
-    // set a positive value
-    builder.setMaxThread(key, 1234);
-    assertEquals("core threads not set, but failed return", 1234, builder.getMaxThreads());
-  }
-}
\ No newline at end of file