You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by mu...@apache.org on 2014/02/15 01:07:48 UTC

[15/15] git commit: Rename package from org.apache.hadoop.hbase.index.* to org.apache.phoenix.index.* to fix classloader issue causing mutable index performance regression - https://issues.apache.org/jira/browse/PHOENIX-38

Rename package from org.apache.hadoop.hbase.index.* to org.apache.phoenix.index.* to fix classloader issue causing mutable index performance regression - https://issues.apache.org/jira/browse/PHOENIX-38


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/bbacf6e0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/bbacf6e0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/bbacf6e0

Branch: refs/heads/master
Commit: bbacf6e03045bab692aecbea00a4b63ee7593e73
Parents: eebeed4
Author: Mujtaba <mu...@apache.org>
Authored: Fri Feb 14 16:04:38 2014 -0800
Committer: Mujtaba <mu...@apache.org>
Committed: Fri Feb 14 16:04:38 2014 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/index/CapturingAbortable.java  |  66 --
 .../hbase/index/IndexLogRollSynchronizer.java   | 123 ---
 .../org/apache/hadoop/hbase/index/Indexer.java  | 704 ---------------
 .../apache/hadoop/hbase/index/ValueGetter.java  |  36 -
 .../hbase/index/builder/BaseIndexBuilder.java   |  97 --
 .../hbase/index/builder/IndexBuildManager.java  | 214 -----
 .../hbase/index/builder/IndexBuilder.java       | 142 ---
 .../builder/IndexBuildingFailureException.java  |  48 -
 .../hadoop/hbase/index/covered/Batch.java       |  60 --
 .../hbase/index/covered/CoveredColumns.java     |  49 --
 .../covered/CoveredColumnsIndexBuilder.java     | 488 ----------
 .../hadoop/hbase/index/covered/IndexCodec.java  | 110 ---
 .../hadoop/hbase/index/covered/IndexUpdate.java |  77 --
 .../hbase/index/covered/KeyValueStore.java      |  33 -
 .../hbase/index/covered/LocalTableState.java    | 242 -----
 .../hadoop/hbase/index/covered/TableState.java  | 116 ---
 .../hbase/index/covered/data/IndexMemStore.java | 331 -------
 .../index/covered/data/LazyValueGetter.java     |  87 --
 .../index/covered/data/LocalHBaseState.java     |  47 -
 .../hbase/index/covered/data/LocalTable.java    |  72 --
 .../index/covered/example/ColumnGroup.java      | 112 ---
 .../index/covered/example/CoveredColumn.java    | 107 ---
 .../example/CoveredColumnIndexCodec.java        | 367 --------
 .../CoveredColumnIndexSpecifierBuilder.java     | 184 ----
 .../covered/example/CoveredColumnIndexer.java   | 164 ----
 .../filter/ApplyAndFilterDeletesFilter.java     | 308 -------
 ...olumnTrackingNextLargestTimestampFilter.java |  72 --
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --
 .../covered/filter/MaxTimestampFilter.java      |  74 --
 .../covered/filter/NewerTimestampFilter.java    |  55 --
 .../index/covered/update/ColumnReference.java   | 166 ----
 .../index/covered/update/ColumnTracker.java     | 115 ---
 .../covered/update/IndexUpdateManager.java      | 238 -----
 .../covered/update/IndexedColumnGroup.java      |  28 -
 .../index/covered/update/SortedCollection.java  | 128 ---
 .../index/exception/IndexWriteException.java    |  43 -
 .../MultiIndexWriteFailureException.java        |  44 -
 .../SingleIndexWriteFailureException.java       |  61 --
 .../hbase/index/parallel/BaseTaskRunner.java    | 129 ---
 .../hbase/index/parallel/EarlyExitFailure.java  |  34 -
 .../index/parallel/QuickFailingTaskRunner.java  |  49 --
 .../hadoop/hbase/index/parallel/Task.java       |  40 -
 .../hadoop/hbase/index/parallel/TaskBatch.java  |  74 --
 .../hadoop/hbase/index/parallel/TaskRunner.java |  60 --
 .../hbase/index/parallel/ThreadPoolBuilder.java |  90 --
 .../hbase/index/parallel/ThreadPoolManager.java | 146 ---
 .../parallel/WaitForCompletionTaskRunner.java   |  51 --
 .../hbase/index/scanner/EmptyScanner.java       |  50 --
 .../index/scanner/FilteredKeyValueScanner.java  | 162 ----
 .../hadoop/hbase/index/scanner/Scanner.java     |  55 --
 .../hbase/index/scanner/ScannerBuilder.java     | 165 ----
 .../hbase/index/table/CachingHTableFactory.java | 110 ---
 .../index/table/CoprocessorHTableFactory.java   |  68 --
 .../hadoop/hbase/index/table/HTableFactory.java |  32 -
 .../index/table/HTableInterfaceReference.java   |  64 --
 .../hbase/index/util/ImmutableBytesPtr.java     | 111 ---
 .../hbase/index/util/IndexManagementUtil.java   | 244 -----
 .../hadoop/hbase/index/wal/IndexedKeyValue.java | 173 ----
 .../hadoop/hbase/index/wal/KeyValueCodec.java   |  97 --
 .../hbase/index/write/IndexCommitter.java       |  37 -
 .../hbase/index/write/IndexFailurePolicy.java   |  44 -
 .../hadoop/hbase/index/write/IndexWriter.java   | 224 -----
 .../hbase/index/write/IndexWriterUtils.java     |  72 --
 .../index/write/KillServerOnFailurePolicy.java  |  80 --
 .../write/ParallelWriterIndexCommitter.java     | 210 -----
 .../recovery/PerRegionIndexWriteCache.java      |  63 --
 .../recovery/StoreFailuresInCachePolicy.java    |  84 --
 .../TrackingParallelWriterIndexCommitter.java   | 226 -----
 .../hbase/regionserver/wal/IndexedWALEdit.java  |   2 +-
 .../regionserver/wal/IndexedWALEditCodec.java   |   4 +-
 .../org/apache/phoenix/cache/GlobalCache.java   |   2 +-
 .../org/apache/phoenix/cache/HashCache.java     |   2 +-
 .../org/apache/phoenix/cache/TenantCache.java   |   2 +-
 .../apache/phoenix/cache/TenantCacheImpl.java   |   2 +-
 .../phoenix/cache/aggcache/SpillManager.java    |   2 +-
 .../apache/phoenix/cache/aggcache/SpillMap.java |   3 +-
 .../cache/aggcache/SpillableGroupByCache.java   |   2 +-
 .../phoenix/client/GenericKeyValueBuilder.java  |   2 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |   2 +-
 .../apache/phoenix/compile/QueryCompiler.java   |   2 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   2 +-
 .../GroupedAggregateRegionObserver.java         |   2 +-
 .../coprocessor/HashJoinRegionScanner.java      |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java       |   4 +-
 .../coprocessor/ServerCachingEndpointImpl.java  |   2 +-
 .../phoenix/exception/SQLExceptionCode.java     |   2 +-
 .../apache/phoenix/execute/HashJoinPlan.java    |   2 +-
 .../apache/phoenix/execute/MutationState.java   |   2 +-
 .../phoenix/expression/InListExpression.java    |   2 +-
 .../aggregator/BaseDecimalStddevAggregator.java |   2 +-
 .../aggregator/BaseStddevAggregator.java        |   2 +-
 .../DistinctValueWithCountClientAggregator.java |   2 +-
 .../DistinctValueWithCountServerAggregator.java |   2 +-
 .../filter/MultiCQKeyValueComparisonFilter.java |   2 +-
 .../phoenix/hbase/index/CapturingAbortable.java |  66 ++
 .../hbase/index/IndexLogRollSynchronizer.java   | 123 +++
 .../org/apache/phoenix/hbase/index/Indexer.java | 704 +++++++++++++++
 .../apache/phoenix/hbase/index/ValueGetter.java |  36 +
 .../hbase/index/builder/BaseIndexBuilder.java   |  97 ++
 .../hbase/index/builder/IndexBuildManager.java  | 214 +++++
 .../hbase/index/builder/IndexBuilder.java       | 142 +++
 .../builder/IndexBuildingFailureException.java  |  48 +
 .../phoenix/hbase/index/covered/Batch.java      |  60 ++
 .../hbase/index/covered/CoveredColumns.java     |  49 ++
 .../covered/CoveredColumnsIndexBuilder.java     | 488 ++++++++++
 .../phoenix/hbase/index/covered/IndexCodec.java | 110 +++
 .../hbase/index/covered/IndexUpdate.java        |  77 ++
 .../hbase/index/covered/KeyValueStore.java      |  33 +
 .../hbase/index/covered/LocalTableState.java    | 242 +++++
 .../phoenix/hbase/index/covered/TableState.java | 116 +++
 .../hbase/index/covered/data/IndexMemStore.java | 331 +++++++
 .../index/covered/data/LazyValueGetter.java     |  87 ++
 .../index/covered/data/LocalHBaseState.java     |  47 +
 .../hbase/index/covered/data/LocalTable.java    |  72 ++
 .../index/covered/example/ColumnGroup.java      | 112 +++
 .../index/covered/example/CoveredColumn.java    | 107 +++
 .../example/CoveredColumnIndexCodec.java        | 367 ++++++++
 .../CoveredColumnIndexSpecifierBuilder.java     | 184 ++++
 .../covered/example/CoveredColumnIndexer.java   | 164 ++++
 .../filter/ApplyAndFilterDeletesFilter.java     | 308 +++++++
 ...olumnTrackingNextLargestTimestampFilter.java |  72 ++
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 ++
 .../covered/filter/MaxTimestampFilter.java      |  74 ++
 .../covered/filter/NewerTimestampFilter.java    |  55 ++
 .../index/covered/update/ColumnReference.java   | 166 ++++
 .../index/covered/update/ColumnTracker.java     | 115 +++
 .../covered/update/IndexUpdateManager.java      | 239 +++++
 .../covered/update/IndexedColumnGroup.java      |  28 +
 .../index/covered/update/SortedCollection.java  | 128 +++
 .../index/exception/IndexWriteException.java    |  43 +
 .../MultiIndexWriteFailureException.java        |  44 +
 .../SingleIndexWriteFailureException.java       |  61 ++
 .../hbase/index/parallel/BaseTaskRunner.java    | 129 +++
 .../hbase/index/parallel/EarlyExitFailure.java  |  34 +
 .../index/parallel/QuickFailingTaskRunner.java  |  49 ++
 .../phoenix/hbase/index/parallel/Task.java      |  40 +
 .../phoenix/hbase/index/parallel/TaskBatch.java |  74 ++
 .../hbase/index/parallel/TaskRunner.java        |  60 ++
 .../hbase/index/parallel/ThreadPoolBuilder.java |  90 ++
 .../hbase/index/parallel/ThreadPoolManager.java | 146 +++
 .../parallel/WaitForCompletionTaskRunner.java   |  51 ++
 .../hbase/index/scanner/EmptyScanner.java       |  50 ++
 .../index/scanner/FilteredKeyValueScanner.java  | 162 ++++
 .../phoenix/hbase/index/scanner/Scanner.java    |  55 ++
 .../hbase/index/scanner/ScannerBuilder.java     | 165 ++++
 .../hbase/index/table/CachingHTableFactory.java | 110 +++
 .../index/table/CoprocessorHTableFactory.java   |  68 ++
 .../hbase/index/table/HTableFactory.java        |  32 +
 .../index/table/HTableInterfaceReference.java   |  64 ++
 .../hbase/index/util/ImmutableBytesPtr.java     | 111 +++
 .../hbase/index/util/IndexManagementUtil.java   | 244 +++++
 .../hbase/index/wal/IndexedKeyValue.java        | 173 ++++
 .../phoenix/hbase/index/wal/KeyValueCodec.java  |  97 ++
 .../hbase/index/write/IndexCommitter.java       |  37 +
 .../hbase/index/write/IndexFailurePolicy.java   |  45 +
 .../phoenix/hbase/index/write/IndexWriter.java  | 224 +++++
 .../hbase/index/write/IndexWriterUtils.java     |  72 ++
 .../index/write/KillServerOnFailurePolicy.java  |  81 ++
 .../write/ParallelWriterIndexCommitter.java     | 210 +++++
 .../recovery/PerRegionIndexWriteCache.java      |  64 ++
 .../recovery/StoreFailuresInCachePolicy.java    |  84 ++
 .../TrackingParallelWriterIndexCommitter.java   | 226 +++++
 .../apache/phoenix/index/BaseIndexCodec.java    |   2 +-
 .../apache/phoenix/index/IndexMaintainer.java   |   6 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |   4 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |  16 +-
 .../index/PhoenixIndexFailurePolicy.java        |   4 +-
 .../apache/phoenix/join/HashCacheFactory.java   |   2 +-
 .../org/apache/phoenix/join/HashJoinInfo.java   |   2 +-
 .../query/ConnectionQueryServicesImpl.java      |   4 +-
 .../apache/phoenix/query/QueryConstants.java    |   2 +-
 .../java/org/apache/phoenix/schema/PName.java   |   2 +-
 .../org/apache/phoenix/schema/PNameImpl.java    |   2 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |   2 +-
 .../java/org/apache/phoenix/util/ByteUtil.java  |   2 +-
 .../java/org/apache/phoenix/util/IndexUtil.java |   6 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |   2 +-
 .../java/org/apache/phoenix/util/TupleUtil.java |   2 +-
 .../hadoop/hbase/index/IndexTestingUtils.java   |  94 --
 .../hadoop/hbase/index/StubAbortable.java       |  41 -
 .../apache/hadoop/hbase/index/TableName.java    |  45 -
 .../TestFailForUnsupportedHBaseVersions.java    | 155 ----
 .../covered/CoveredIndexCodecForTesting.java    |  71 --
 .../hbase/index/covered/TestCoveredColumns.java |  45 -
 .../TestEndToEndCoveredColumnsIndexBuilder.java | 339 -------
 .../index/covered/TestLocalTableState.java      | 196 -----
 .../index/covered/data/TestIndexMemStore.java   |  93 --
 .../covered/example/TestColumnTracker.java      |  61 --
 .../example/TestCoveredColumnIndexCodec.java    | 248 ------
 .../TestCoveredIndexSpecifierBuilder.java       |  72 --
 .../example/TestEndToEndCoveredIndexing.java    | 877 ------------------
 .../TestEndtoEndIndexingWithCompression.java    |  49 --
 .../covered/example/TestFailWithoutRetries.java | 145 ---
 .../filter/TestApplyAndFilterDeletesFilter.java | 210 -----
 .../covered/filter/TestFamilyOnlyFilter.java    | 105 ---
 .../filter/TestNewerTimestampFilter.java        |  47 -
 .../covered/update/TestIndexUpdateManager.java  | 140 ---
 .../index/parallel/TestThreadPoolBuilder.java   |  63 --
 .../index/parallel/TestThreadPoolManager.java   |  93 --
 .../index/util/TestIndexManagementUtil.java     |  66 --
 .../hbase/index/write/FakeTableFactory.java     |  51 --
 .../index/write/TestCachingHTableFactory.java   |  58 --
 .../hbase/index/write/TestIndexWriter.java      | 284 ------
 .../index/write/TestParalleIndexWriter.java     | 117 ---
 .../write/TestParalleWriterIndexCommitter.java  | 117 ---
 .../index/write/TestWALRecoveryCaching.java     | 368 --------
 .../recovery/TestPerRegionIndexWriteCache.java  | 168 ----
 .../wal/TestReadWriteKeyValuesWithCodec.java    |   4 +-
 ...ALReplayWithIndexWritesAndCompressedWAL.java |  12 +-
 ...exWritesAndUncompressedWALInHBase_094_9.java |   2 +-
 .../org/apache/phoenix/end2end/QueryTest.java   |   3 +-
 .../phoenix/hbase/index/IndexTestingUtils.java  |  94 ++
 .../phoenix/hbase/index/StubAbortable.java      |  41 +
 .../apache/phoenix/hbase/index/TableName.java   |  45 +
 .../TestFailForUnsupportedHBaseVersions.java    | 156 ++++
 .../covered/CoveredIndexCodecForTesting.java    |  74 ++
 .../hbase/index/covered/TestCoveredColumns.java |  46 +
 .../TestEndToEndCoveredColumnsIndexBuilder.java | 344 ++++++++
 .../index/covered/TestLocalTableState.java      | 198 +++++
 .../index/covered/data/TestIndexMemStore.java   |  94 ++
 .../covered/example/TestColumnTracker.java      |  61 ++
 .../example/TestCoveredColumnIndexCodec.java    | 251 ++++++
 .../TestCoveredIndexSpecifierBuilder.java       |  75 ++
 .../example/TestEndToEndCoveredIndexing.java    | 882 +++++++++++++++++++
 .../TestEndtoEndIndexingWithCompression.java    |  49 ++
 .../covered/example/TestFailWithoutRetries.java | 148 ++++
 .../filter/TestApplyAndFilterDeletesFilter.java | 211 +++++
 .../covered/filter/TestFamilyOnlyFilter.java    | 106 +++
 .../filter/TestNewerTimestampFilter.java        |  48 +
 .../covered/update/TestIndexUpdateManager.java  | 140 +++
 .../index/parallel/TestThreadPoolBuilder.java   |  64 ++
 .../index/parallel/TestThreadPoolManager.java   |  95 ++
 .../index/util/TestIndexManagementUtil.java     |  67 ++
 .../hbase/index/write/FakeTableFactory.java     |  51 ++
 .../index/write/TestCachingHTableFactory.java   |  58 ++
 .../hbase/index/write/TestIndexWriter.java      | 287 ++++++
 .../index/write/TestParalleIndexWriter.java     | 119 +++
 .../write/TestParalleWriterIndexCommitter.java  | 119 +++
 .../index/write/TestWALRecoveryCaching.java     | 369 ++++++++
 .../recovery/TestPerRegionIndexWriteCache.java  | 168 ++++
 .../phoenix/index/IndexMaintainerTest.java      |   6 +-
 241 files changed, 13069 insertions(+), 13021 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/CapturingAbortable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/CapturingAbortable.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/CapturingAbortable.java
deleted file mode 100644
index f918c90..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/CapturingAbortable.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index;
-
-import org.apache.hadoop.hbase.Abortable;
-
-/**
- * {@link Abortable} that can rethrow the cause of the abort.
- */
-public class CapturingAbortable implements Abortable {
-
-  private Abortable delegate;
-  private Throwable cause;
-  private String why;
-
-  public CapturingAbortable(Abortable delegate) {
-    this.delegate = delegate;
-  }
-
-  @Override
-  public void abort(String why, Throwable e) {
-    if (delegate.isAborted()) {
-      return;
-    }
-    this.why = why;
-    this.cause = e;
-    delegate.abort(why, e);
-
-  }
-
-  @Override
-  public boolean isAborted() {
-    return delegate.isAborted();
-  }
-
-  /**
-   * Throw the cause of the abort, if <tt>this</tt> was aborted. If there was an exception causing
-   * the abort, re-throws that. Otherwise, just throws a generic {@link Exception} with the reason
-   * why the abort was caused.
-   * @throws Throwable the cause of the abort.
-   */
-  public void throwCauseIfAborted() throws Throwable {
-    if (!this.isAborted()) {
-      return;
-    }
-    if (cause == null) {
-      throw new Exception(why);
-    }
-    throw cause;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/IndexLogRollSynchronizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/IndexLogRollSynchronizer.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/IndexLogRollSynchronizer.java
deleted file mode 100644
index a4ef58e..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/IndexLogRollSynchronizer.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index;
-
-import java.io.IOException;
-import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
-import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-
-/**
- * Ensure that the log isn't rolled while we are the in middle of doing a pending index write.
- * <p>
- * The problem we are trying to solve is the following sequence:
- * <ol>
- * <li>Write to the indexed table</li>
- * <li>Write the index-containing WALEdit</li>
- * <li>Start writing to the index tables in the postXXX hook</li>
- * <li>WAL gets rolled and archived</li>
- * <li>An index update fails, in which case we should kill ourselves to get WAL replay</li>
- * <li>Since the WAL got archived, we won't get the replay of the index writes</li>
- * </ol>
- * <p>
- * The usual course of events should be:
- * <ol>
- * <li>In a preXXX hook,
- * <ol>
- * <li>Build the {@link WALEdit} + index information</li>
- * <li>Lock the {@link IndexLogRollSynchronizer#logArchiveLock}</li>
- * <ul>
- * <li>This is a reentrant readlock on the WAL archiving, so we can make multiple WAL/index updates
- * concurrently</li>
- * </ul>
- * </li>
- * </ol>
- * </li>
- * <li>Pass that {@link WALEdit} to the WAL, ensuring its durable and replayable</li>
- * <li>In the corresponding postXXX,
- * <ol>
- * <li>make the updates to the index tables</li>
- * <li>Unlock {@link IndexLogRollSynchronizer#logArchiveLock}</li>
- * </ol>
- * </li> </ol>
- * <p>
- * <tt>this</tt> should be added as a {@link WALActionsListener} by updating
- */
-public class IndexLogRollSynchronizer implements WALActionsListener {
-
-  private static final Log LOG = LogFactory.getLog(IndexLogRollSynchronizer.class);
-  private WriteLock logArchiveLock;
-
-  public IndexLogRollSynchronizer(WriteLock logWriteLock){
-    this.logArchiveLock = logWriteLock;
-  }
-
-
-  @Override
-  public void preLogArchive(Path oldPath, Path newPath) throws IOException {
-    //take a write lock on the index - any pending index updates will complete before we finish
-    LOG.debug("Taking INDEX_UPDATE writelock");
-    logArchiveLock.lock();
-    LOG.debug("Got the INDEX_UPDATE writelock");
-  }
-  
-  @Override
-  public void postLogArchive(Path oldPath, Path newPath) throws IOException {
-    // done archiving the logs, any WAL updates will be replayed on failure
-    LOG.debug("Releasing INDEX_UPDATE writelock");
-    logArchiveLock.unlock();
-  }
-
-  @Override
-  public void logCloseRequested() {
-    // don't care- before this is called, all the HRegions are closed, so we can't get any new
-    // requests and all pending request can finish before the WAL closes.
-  }
-
-  @Override
-  public void preLogRoll(Path oldPath, Path newPath) throws IOException {
-    // noop
-  }
-
-  @Override
-  public void postLogRoll(Path oldPath, Path newPath) throws IOException {
-    // noop
-  }
-
-  @Override
-  public void logRollRequested() {
-    // noop
-  }
-
-  @Override
-  public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit) {
-    // noop
-  }
-
-  @Override
-  public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
-    // noop
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/Indexer.java
deleted file mode 100644
index ebe685d..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/Indexer.java
+++ /dev/null
@@ -1,704 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index;
-
-import static org.apache.hadoop.hbase.index.util.IndexManagementUtil.rethrowIndexingException;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
-import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Pair;
-
-import com.google.common.collect.Multimap;
-import org.apache.hadoop.hbase.index.builder.IndexBuildManager;
-import org.apache.hadoop.hbase.index.builder.IndexBuilder;
-import org.apache.hadoop.hbase.index.builder.IndexBuildingFailureException;
-import org.apache.hadoop.hbase.index.table.HTableInterfaceReference;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
-import org.apache.hadoop.hbase.index.util.IndexManagementUtil;
-import org.apache.hadoop.hbase.index.wal.IndexedKeyValue;
-import org.apache.hadoop.hbase.index.write.IndexFailurePolicy;
-import org.apache.hadoop.hbase.index.write.IndexWriter;
-import org.apache.hadoop.hbase.index.write.recovery.PerRegionIndexWriteCache;
-import org.apache.hadoop.hbase.index.write.recovery.StoreFailuresInCachePolicy;
-import org.apache.hadoop.hbase.index.write.recovery.TrackingParallelWriterIndexCommitter;
-import org.apache.phoenix.util.MetaDataUtil;
-
-/**
- * Do all the work of managing index updates from a single coprocessor. All Puts/Delets are passed
- * to an {@link IndexBuilder} to determine the actual updates to make.
- * <p>
- * If the WAL is enabled, these updates are then added to the WALEdit and attempted to be written to
- * the WAL after the WALEdit has been saved. If any of the index updates fail, this server is
- * immediately terminated and we rely on WAL replay to attempt the index updates again (see
- * {@link #preWALRestore(ObserverContext, HRegionInfo, HLogKey, WALEdit)}).
- * <p>
- * If the WAL is disabled, the updates are attempted immediately. No consistency guarantees are made
- * if the WAL is disabled - some or none of the index updates may be successful. All updates in a
- * single batch must have the same durability level - either everything gets written to the WAL or
- * nothing does. Currently, we do not support mixed-durability updates within a single batch. If you
- * want to have different durability levels, you only need to split the updates into two different
- * batches.
- */
-public class Indexer extends BaseRegionObserver {
-
-  private static final Log LOG = LogFactory.getLog(Indexer.class);
-
-  /** WAL on this server */
-  private HLog log;
-  protected IndexWriter writer;
-  protected IndexBuildManager builder;
-
-  /** Configuration key for the {@link IndexBuilder} to use */
-  public static final String INDEX_BUILDER_CONF_KEY = "index.builder";
-
-  // Setup out locking on the index edits/WAL so we can be sure that we don't lose a roll a WAL edit
-  // before an edit is applied to the index tables
-  private static final ReentrantReadWriteLock INDEX_READ_WRITE_LOCK = new ReentrantReadWriteLock(
-      true);
-  public static final ReadLock INDEX_UPDATE_LOCK = INDEX_READ_WRITE_LOCK.readLock();
-
-  /**
-   * Configuration key for if the indexer should check the version of HBase is running. Generally,
-   * you only want to ignore this for testing or for custom versions of HBase.
-   */
-  public static final String CHECK_VERSION_CONF_KEY = "com.saleforce.hbase.index.checkversion";
-
-  private static final String INDEX_RECOVERY_FAILURE_POLICY_KEY = "org.apache.hadoop.hbase.index.recovery.failurepolicy";
-
-  /**
-   * Marker {@link KeyValue} to indicate that we are doing a batch operation. Needed because the
-   * coprocessor framework throws away the WALEdit from the prePut/preDelete hooks when checking a
-   * batch if there were no {@link KeyValue}s attached to the {@link WALEdit}. When you get down to
-   * the preBatch hook, there won't be any WALEdits to which to add the index updates.
-   */
-  private static KeyValue BATCH_MARKER = new KeyValue();
-
-  /**
-   * cache the failed updates to the various regions. Used for making the WAL recovery mechanisms
-   * more robust in the face of recoverying index regions that were on the same server as the
-   * primary table region
-   */
-  private PerRegionIndexWriteCache failedIndexEdits = new PerRegionIndexWriteCache();
-
-  /**
-   * IndexWriter for writing the recovered index edits. Separate from the main indexer since we need
-   * different write/failure policies
-   */
-  private IndexWriter recoveryWriter;
-
-  private boolean stopped;
-  private boolean disabled;
-
-  public static final String RecoveryFailurePolicyKeyForTesting = INDEX_RECOVERY_FAILURE_POLICY_KEY;
-
-    public static final int INDEXING_SUPPORTED_MAJOR_VERSION = MetaDataUtil
-            .encodeMaxPatchVersion(0, 94);
-    public static final int INDEXING_SUPPORTED__MIN_MAJOR_VERSION = MetaDataUtil
-            .encodeVersion("0.94.0");
-    private static final int INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION = MetaDataUtil
-            .encodeVersion("0.94.9");
-
-  @Override
-  public void start(CoprocessorEnvironment e) throws IOException {
-      try {
-        final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
-        String serverName = env.getRegionServerServices().getServerName().getServerName();
-        if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
-          // make sure the right version <-> combinations are allowed.
-          String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration());
-          if (errormsg != null) {
-            IOException ioe = new IOException(errormsg);
-            env.getRegionServerServices().abort(errormsg, ioe);
-            throw ioe;
-          }
-        }
-    
-        this.builder = new IndexBuildManager(env);
-    
-        // get a reference to the WAL
-        log = env.getRegionServerServices().getWAL();
-        // add a synchronizer so we don't archive a WAL that we need
-        log.registerWALActionsListener(new IndexLogRollSynchronizer(INDEX_READ_WRITE_LOCK.writeLock()));
-    
-        // setup the actual index writer
-        this.writer = new IndexWriter(env, serverName + "-index-writer");
-    
-        // setup the recovery writer that does retries on the failed edits
-        TrackingParallelWriterIndexCommitter recoveryCommmiter =
-            new TrackingParallelWriterIndexCommitter();
-    
-        try {
-          // get the specified failure policy. We only ever override it in tests, but we need to do it
-          // here
-          Class<? extends IndexFailurePolicy> policyClass =
-              env.getConfiguration().getClass(INDEX_RECOVERY_FAILURE_POLICY_KEY,
-                StoreFailuresInCachePolicy.class, IndexFailurePolicy.class);
-          IndexFailurePolicy policy =
-              policyClass.getConstructor(PerRegionIndexWriteCache.class).newInstance(failedIndexEdits);
-          LOG.debug("Setting up recovery writter with committer: " + recoveryCommmiter.getClass()
-              + " and failure policy: " + policy.getClass());
-          recoveryWriter =
-              new IndexWriter(recoveryCommmiter, policy, env, serverName + "-recovery-writer");
-        } catch (Exception ex) {
-          throw new IOException("Could not instantiate recovery failure policy!", ex);
-        }
-      } catch (NoSuchMethodError ex) {
-          disabled = true;
-          super.start(e);
-          LOG.error("Must be too early a version of HBase. Disabled coprocessor ", ex);
-      }
-  }
-
-  @Override
-  public void stop(CoprocessorEnvironment e) throws IOException {
-    if (this.stopped) {
-      return;
-    }
-    if (this.disabled) {
-        super.stop(e);
-        return;
-      }
-    this.stopped = true;
-    String msg = "Indexer is being stopped";
-    this.builder.stop(msg);
-    this.writer.stop(msg);
-    this.recoveryWriter.stop(msg);
-  }
-
-  @Override
-  public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put,
-      final WALEdit edit, final boolean writeToWAL) throws IOException {
-      if (this.disabled) {
-          super.prePut(c, put, edit, writeToWAL);
-          return;
-        }
-    // just have to add a batch marker to the WALEdit so we get the edit again in the batch
-    // processing step. We let it throw an exception here because something terrible has happened.
-    edit.add(BATCH_MARKER);
-  }
-
-  @Override
-  public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete,
-      WALEdit edit, boolean writeToWAL) throws IOException {
-      if (this.disabled) {
-          super.preDelete(e, delete, edit, writeToWAL);
-          return;
-        }
-    try {
-      preDeleteWithExceptions(e, delete, edit, writeToWAL);
-      return;
-    } catch (Throwable t) {
-      rethrowIndexingException(t);
-    }
-    throw new RuntimeException(
-        "Somehow didn't return an index update but also didn't propagate the failure to the client!");
-  }
-
-  public void preDeleteWithExceptions(ObserverContext<RegionCoprocessorEnvironment> e,
-      Delete delete, WALEdit edit, boolean writeToWAL) throws Exception {
-    // if we are making the update as part of a batch, we need to add in a batch marker so the WAL
-    // is retained
-    if (this.builder.getBatchId(delete) != null) {
-      edit.add(BATCH_MARKER);
-      return;
-    }
-
-    // get the mapping for index column -> target index table
-    Collection<Pair<Mutation, byte[]>> indexUpdates = this.builder.getIndexUpdate(delete);
-
-    if (doPre(indexUpdates, edit, writeToWAL)) {
-      takeUpdateLock("delete");
-    }
-  }
-
-  @Override
-  public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
-      MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) throws IOException {
-      if (this.disabled) {
-          super.preBatchMutate(c, miniBatchOp);
-          return;
-        }
-    try {
-      preBatchMutateWithExceptions(c, miniBatchOp);
-      return;
-    } catch (Throwable t) {
-      rethrowIndexingException(t);
-    }
-    throw new RuntimeException(
-        "Somehow didn't return an index update but also didn't propagate the failure to the client!");
-  }
-
-  @SuppressWarnings("deprecation")
-  public void preBatchMutateWithExceptions(ObserverContext<RegionCoprocessorEnvironment> c,
-      MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) throws Throwable {
-
-    // first group all the updates for a single row into a single update to be processed
-    Map<ImmutableBytesPtr, MultiMutation> mutations =
-        new HashMap<ImmutableBytesPtr, MultiMutation>();
-    boolean durable = false;
-    for (int i = 0; i < miniBatchOp.size(); i++) {
-      // remove the batch keyvalue marker - its added for all puts
-      WALEdit edit = miniBatchOp.getWalEdit(i);
-      // we don't have a WALEdit for immutable index cases, which still see this path
-      // we could check is indexing is enable for the mutation in prePut and then just skip this
-      // after checking here, but this saves us the checking again.
-      if (edit != null) {
-        KeyValue kv = edit.getKeyValues().remove(0);
-        assert kv == BATCH_MARKER : "Expected batch marker from the WALEdit, but got: " + kv;
-      }
-      Pair<Mutation, Integer> op = miniBatchOp.getOperation(i);
-      Mutation m = op.getFirst();
-      // skip this mutation if we aren't enabling indexing
-      // unfortunately, we really should ask if the raw mutation (rather than the combined mutation)
-      // should be indexed, which means we need to expose another method on the builder. Such is the
-      // way optimization go though.
-      if (!this.builder.isEnabled(m)) {
-        continue;
-      }
-      
-      // figure out if this is batch is durable or not
-      if(!durable){
-        durable = m.getDurability() != Durability.SKIP_WAL;
-      }
-
-      // add the mutation to the batch set
-      ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
-      MultiMutation stored = mutations.get(row);
-      // we haven't seen this row before, so add it
-      if (stored == null) {
-        stored = new MultiMutation(row, m.getWriteToWAL());
-        mutations.put(row, stored);
-      }
-      stored.addAll(m);
-    }
-    
-    // early exit if it turns out we don't have any edits
-    if (mutations.entrySet().size() == 0) {
-      return;
-    }
-
-    // dump all the index updates into a single WAL. They will get combined in the end anyways, so
-    // don't worry which one we get
-    WALEdit edit = miniBatchOp.getWalEdit(0);
-
-    // get the index updates for all elements in this batch
-    Collection<Pair<Mutation, byte[]>> indexUpdates =
-        this.builder.getIndexUpdate(miniBatchOp, mutations.values());
-    // write them
-    if (doPre(indexUpdates, edit, durable)) {
-      takeUpdateLock("batch mutation");
-    }
-  }
-
-  private void takeUpdateLock(String opDesc) throws IndexBuildingFailureException {
-    boolean interrupted = false;
-    // lock the log, so we are sure that index write gets atomically committed
-    LOG.debug("Taking INDEX_UPDATE readlock for " + opDesc);
-    // wait for the update lock
-    while (!this.stopped) {
-      try {
-        INDEX_UPDATE_LOCK.lockInterruptibly();
-        LOG.debug("Got the INDEX_UPDATE readlock for " + opDesc);
-        // unlock the lock so the server can shutdown, if we find that we have stopped since getting
-        // the lock
-        if (this.stopped) {
-          INDEX_UPDATE_LOCK.unlock();
-          throw new IndexBuildingFailureException(
-              "Found server stop after obtaining the update lock, killing update attempt");
-        }
-        break;
-      } catch (InterruptedException e) {
-        LOG.info("Interrupted while waiting for update lock. Ignoring unless stopped");
-        interrupted = true;
-      }
-    }
-    if (interrupted) {
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  private class MultiMutation extends Mutation {
-
-    private ImmutableBytesPtr rowKey;
-
-    public MultiMutation(ImmutableBytesPtr rowkey, boolean writeToWal) {
-      this.rowKey = rowkey;
-      this.writeToWAL = writeToWal;
-    }
-
-    /**
-     * @param stored
-     */
-    @SuppressWarnings("deprecation")
-    public void addAll(Mutation stored) {
-      // add all the kvs
-      for (Entry<byte[], List<KeyValue>> kvs : stored.getFamilyMap().entrySet()) {
-        byte[] family = kvs.getKey();
-        List<KeyValue> list = getKeyValueList(family, kvs.getValue().size());
-        list.addAll(kvs.getValue());
-        familyMap.put(family, list);
-      }
-
-      // add all the attributes, not overriding already stored ones
-      for (Entry<String, byte[]> attrib : stored.getAttributesMap().entrySet()) {
-        if (this.getAttribute(attrib.getKey()) == null) {
-          this.setAttribute(attrib.getKey(), attrib.getValue());
-        }
-      }
-      if (stored.getWriteToWAL()) {
-        this.writeToWAL = true;
-      }
-    }
-
-    private List<KeyValue> getKeyValueList(byte[] family, int hint) {
-      List<KeyValue> list = familyMap.get(family);
-      if (list == null) {
-        list = new ArrayList<KeyValue>(hint);
-      }
-      return list;
-    }
-
-    @Override
-    public byte[] getRow(){
-      return this.rowKey.copyBytesIfNecessary();
-    }
-
-    @Override
-    public int hashCode() {
-      return this.rowKey.hashCode();
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      return o == null ? false : o.hashCode() == this.hashCode();
-    }
-
-    @Override
-    public void readFields(DataInput arg0) throws IOException {
-      throw new UnsupportedOperationException("MultiMutations cannot be read/written");
-    }
-
-    @Override
-    public void write(DataOutput arg0) throws IOException {
-      throw new UnsupportedOperationException("MultiMutations cannot be read/written");
-    }
-  }
-
-  /**
-   * Add the index updates to the WAL, or write to the index table, if the WAL has been disabled
-   * @return <tt>true</tt> if the WAL has been updated.
-   * @throws IOException
-   */
-  private boolean doPre(Collection<Pair<Mutation, byte[]>> indexUpdates, final WALEdit edit,
-      final boolean writeToWAL) throws IOException {
-    // no index updates, so we are done
-    if (indexUpdates == null || indexUpdates.size() == 0) {
-      return false;
-    }
-
-    // if writing to wal is disabled, we never see the WALEdit updates down the way, so do the index
-    // update right away
-    if (!writeToWAL) {
-      try {
-        this.writer.write(indexUpdates);
-        return false;
-      } catch (Throwable e) {
-        LOG.error("Failed to update index with entries:" + indexUpdates, e);
-        IndexManagementUtil.rethrowIndexingException(e);
-      }
-    }
-
-    // we have all the WAL durability, so we just update the WAL entry and move on
-    for (Pair<Mutation, byte[]> entry : indexUpdates) {
-      edit.add(new IndexedKeyValue(entry.getSecond(), entry.getFirst()));
-    }
-
-    return true;
-  }
-
-  @Override
-  public void postPut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit,
-      boolean writeToWAL) throws IOException {
-      if (this.disabled) {
-          super.postPut(e, put, edit, writeToWAL);
-          return;
-        }
-    doPost(edit, put, writeToWAL);
-  }
-
-  @Override
-  public void postDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete,
-      WALEdit edit, boolean writeToWAL) throws IOException {
-      if (this.disabled) {
-          super.postDelete(e, delete, edit, writeToWAL);
-          return;
-        }
-    doPost(edit,delete, writeToWAL);
-  }
-
-  @Override
-  public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
-      MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) throws IOException {
-      if (this.disabled) {
-          super.postBatchMutate(c, miniBatchOp);
-          return;
-        }
-    this.builder.batchCompleted(miniBatchOp);
-    // noop for the rest of the indexer - its handled by the first call to put/delete
-  }
-
-  private void doPost(WALEdit edit, Mutation m, boolean writeToWAL) throws IOException {
-    try {
-      doPostWithExceptions(edit, m, writeToWAL);
-      return;
-    } catch (Throwable e) {
-      rethrowIndexingException(e);
-    }
-    throw new RuntimeException(
-        "Somehow didn't complete the index update, but didn't return succesfully either!");
-  }
-
-  private void doPostWithExceptions(WALEdit edit, Mutation m, boolean writeToWAL) throws Exception {
-    //short circuit, if we don't need to do any work
-    if (!writeToWAL || !this.builder.isEnabled(m)) {
-      // already did the index update in prePut, so we are done
-      return;
-    }
-
-    // there is a little bit of excess here- we iterate all the non-indexed kvs for this check first
-    // and then do it again later when getting out the index updates. This should be pretty minor
-    // though, compared to the rest of the runtime
-    IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
-    /*
-     * early exit - we have nothing to write, so we don't need to do anything else. NOTE: we don't
-     * release the WAL Rolling lock (INDEX_UPDATE_LOCK) since we never take it in doPre if there are
-     * no index updates.
-     */
-    if (ikv == null) {
-      return;
-    }
-
-    /*
-     * only write the update if we haven't already seen this batch. We only want to write the batch
-     * once (this hook gets called with the same WALEdit for each Put/Delete in a batch, which can
-     * lead to writing all the index updates for each Put/Delete).
-     */
-    if (!ikv.getBatchFinished()) {
-      Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(edit);
-
-      // the WAL edit is kept in memory and we already specified the factory when we created the
-      // references originally - therefore, we just pass in a null factory here and use the ones
-      // already specified on each reference
-      try {
-          writer.writeAndKillYourselfOnFailure(indexUpdates);
-      } finally {
-        // With a custom kill policy, we may throw instead of kill the server.
-        // Without doing this in a finally block (at least with the mini cluster),
-        // the region server never goes down.
-
-        // mark the batch as having been written. In the single-update case, this never gets check
-        // again, but in the batch case, we will check it again (see above).
-        ikv.markBatchFinished();
-      
-        // release the lock on the index, we wrote everything properly
-        // we took the lock for each Put/Delete, so we have to release it a matching number of times
-        // batch cases only take the lock once, so we need to make sure we don't over-release the
-        // lock.
-        LOG.debug("Releasing INDEX_UPDATE readlock");
-        INDEX_UPDATE_LOCK.unlock();
-      }
-    }
-  }
-
-  /**
-   * Search the {@link WALEdit} for the first {@link IndexedKeyValue} present
-   * @param edit {@link WALEdit}
-   * @return the first {@link IndexedKeyValue} in the {@link WALEdit} or <tt>null</tt> if not
-   *         present
-   */
-  private IndexedKeyValue getFirstIndexedKeyValue(WALEdit edit) {
-    for (KeyValue kv : edit.getKeyValues()) {
-      if (kv instanceof IndexedKeyValue) {
-        return (IndexedKeyValue) kv;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Extract the index updates from the WAL Edit
-   * @param edit to search for index updates
-   * @return the mutations to apply to the index tables
-   */
-  private Collection<Pair<Mutation, byte[]>> extractIndexUpdate(WALEdit edit) {
-    Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
-    for (KeyValue kv : edit.getKeyValues()) {
-      if (kv instanceof IndexedKeyValue) {
-        IndexedKeyValue ikv = (IndexedKeyValue) kv;
-        indexUpdates.add(new Pair<Mutation, byte[]>(ikv.getMutation(), ikv.getIndexTable()));
-      }
-    }
-
-    return indexUpdates;
-  }
-
-  @Override
-  public void postOpen(final ObserverContext<RegionCoprocessorEnvironment> c) {
-    Multimap<HTableInterfaceReference, Mutation> updates = failedIndexEdits.getEdits(c.getEnvironment().getRegion());
-    
-    if (this.disabled) {
-        super.postOpen(c);
-        return;
-      }
-    LOG.info("Found some outstanding index updates that didn't succeed during"
-        + " WAL replay - attempting to replay now.");
-    //if we have no pending edits to complete, then we are done
-    if (updates == null || updates.size() == 0) {
-      return;
-    }
-    
-    // do the usual writer stuff, killing the server again, if we can't manage to make the index
-    // writes succeed again
-    try {
-        writer.writeAndKillYourselfOnFailure(updates);
-    } catch (IOException e) {
-        LOG.error("Exception thrown instead of killing server during index writing", e);
-    }
-  }
-
-  @Override
-  public void preWALRestore(ObserverContext<RegionCoprocessorEnvironment> env, HRegionInfo info,
-      HLogKey logKey, WALEdit logEdit) throws IOException {
-      if (this.disabled) {
-          super.preWALRestore(env, info, logKey, logEdit);
-          return;
-        }
-    // TODO check the regions in transition. If the server on which the region lives is this one,
-    // then we should rety that write later in postOpen.
-    // we might be able to get even smarter here and pre-split the edits that are server-local
-    // into their own recovered.edits file. This then lets us do a straightforward recovery of each
-    // region (and more efficiently as we aren't writing quite as hectically from this one place).
-
-    /*
-     * Basically, we let the index regions recover for a little while long before retrying in the
-     * hopes they come up before the primary table finishes.
-     */
-    Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(logEdit);
-    recoveryWriter.writeAndKillYourselfOnFailure(indexUpdates);
-  }
-
-  /**
-   * Create a custom {@link InternalScanner} for a compaction that tracks the versions of rows that
-   * are removed so we can clean then up from the the index table(s).
-   * <p>
-   * This is not yet implemented - its not clear if we should even mess around with the Index table
-   * for these rows as those points still existed. TODO: v2 of indexing
-   */
-  @Override
-  public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
-      InternalScanner s) throws IOException {
-    return super.preCompactScannerOpen(c, store, scanners, scanType, earliestPutTs, s);
-  }
-
-  /**
-   * Exposed for testing!
-   * @return the currently instantiated index builder
-   */
-  public IndexBuilder getBuilderForTesting() {
-    return this.builder.getBuilderForTesting();
-  }
-
-    /**
-     * Validate that the version and configuration parameters are supported
-     * @param hbaseVersion current version of HBase on which <tt>this</tt> coprocessor is installed
-     * @param conf configuration to check for allowed parameters (e.g. WAL Compression only if >=
-     *            0.94.9)
-     * @return <tt>null</tt> if the version is supported, the error message to display otherwise
-     */
-    public static String validateVersion(String hbaseVersion, Configuration conf) {
-        int encodedVersion = MetaDataUtil.encodeVersion(hbaseVersion);
-        // above 0.94 everything should be supported
-        if (encodedVersion > INDEXING_SUPPORTED_MAJOR_VERSION) {
-            return null;
-        }
-        // check to see if its at least 0.94
-        if (encodedVersion < INDEXING_SUPPORTED__MIN_MAJOR_VERSION) {
-            return "Indexing not supported for versions older than 0.94.X";
-        }
-        // if less than 0.94.9, we need to check if WAL Compression is enabled
-        if (encodedVersion < INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION) {
-            if (conf.getBoolean(HConstants.ENABLE_WAL_COMPRESSION, false)) {
-                return "Indexing not supported with WAL Compression for versions of HBase older than 0.94.9 - found version:"
-                        + hbaseVersion;
-            }
-        }
-        return null;
-    }
-
-  /**
-   * Enable indexing on the given table
-   * @param desc {@link HTableDescriptor} for the table on which indexing should be enabled
-   * @param builder class to use when building the index for this table
-   * @param properties map of custom configuration options to make available to your
-   *          {@link IndexBuilder} on the server-side
-   * @throws IOException the Indexer coprocessor cannot be added
-   */
-  public static void enableIndexing(HTableDescriptor desc, Class<? extends IndexBuilder> builder,
-      Map<String, String> properties) throws IOException {
-    if (properties == null) {
-      properties = new HashMap<String, String>();
-    }
-    properties.put(Indexer.INDEX_BUILDER_CONF_KEY, builder.getName());
-    desc.addCoprocessor(Indexer.class.getName(), null, Coprocessor.PRIORITY_USER, properties);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/ValueGetter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/ValueGetter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/ValueGetter.java
deleted file mode 100644
index b328ff2..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/ValueGetter.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-import org.apache.hadoop.hbase.index.util.ImmutableBytesPtr;
-
-public interface ValueGetter {
-
-  /**
-   * Get the most recent (largest timestamp) for the given column reference
-   * @param ref to match against an underlying key value. Uses the passed object to match the
-   *          keyValue via {@link ColumnReference#matches}
-   * @return the stored value for the given {@link ColumnReference}, or <tt>null</tt> if no value is
-   *         present.
-   * @throws IOException if there is an error accessing the underlying data storage
-   */
-  public ImmutableBytesPtr getLatestValue(ColumnReference ref) throws IOException;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/BaseIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/BaseIndexBuilder.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/BaseIndexBuilder.java
deleted file mode 100644
index d369bd8..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/BaseIndexBuilder.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.builder;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
-import org.apache.hadoop.hbase.util.Pair;
-
-import org.apache.hadoop.hbase.index.covered.CoveredColumnsIndexBuilder;
-
-/**
- * Basic implementation of the {@link IndexBuilder} that doesn't do any actual work of indexing.
- * <p>
- * You should extend this class, rather than implementing IndexBuilder directly to maintain
- * compatability going forward.
- * <p>
- * Generally, you should consider using one of the implemented IndexBuilders (e.g
- * {@link CoveredColumnsIndexBuilder}) as there is a lot of work required to keep an index table
- * up-to-date.
- */
-public abstract class BaseIndexBuilder implements IndexBuilder {
-
-  private static final Log LOG = LogFactory.getLog(BaseIndexBuilder.class);
-  protected boolean stopped;
-
-  @Override
-  public void extendBaseIndexBuilderInstead() { }
-  
-  @Override
-  public void setup(RegionCoprocessorEnvironment conf) throws IOException {
-    // noop
-  }
-
-  @Override
-  public void batchStarted(MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) throws IOException {
-    // noop
-  }
-
-  @Override
-  public void batchCompleted(MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) {
-    // noop
-  }
-  
-  /**
-   * By default, we always attempt to index the mutation. Commonly this can be slow (because the
-   * framework spends the time to do the indexing, only to realize that you don't need it) or not
-   * ideal (if you want to turn on/off indexing on a table without completely reloading it).
- * @throws IOException 
-   */
-  @Override
-  public boolean isEnabled(Mutation m) throws IOException {
-    return true; 
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>
-   * By default, assumes that all mutations should <b>not be batched</b>. That is to say, each
-   * mutation always applies to different rows, even if they are in the same batch, or are
-   * independent updates.
-   */
-  @Override
-  public byte[] getBatchId(Mutation m) {
-    return null;
-  }
-
-  @Override
-  public void stop(String why) {
-    LOG.debug("Stopping because: " + why);
-    this.stopped = true;
-  }
-
-  @Override
-  public boolean isStopped() {
-    return this.stopped;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuildManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuildManager.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuildManager.java
deleted file mode 100644
index d450353..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuildManager.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.builder;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
-import org.apache.hadoop.hbase.util.Pair;
-
-import org.apache.hadoop.hbase.index.Indexer;
-import org.apache.hadoop.hbase.index.parallel.QuickFailingTaskRunner;
-import org.apache.hadoop.hbase.index.parallel.Task;
-import org.apache.hadoop.hbase.index.parallel.TaskBatch;
-import org.apache.hadoop.hbase.index.parallel.ThreadPoolBuilder;
-import org.apache.hadoop.hbase.index.parallel.ThreadPoolManager;
-
-/**
- * Manage the building of index updates from primary table updates.
- * <p>
- * Internally, parallelizes updates through a thread-pool to a delegate index builder. Underlying
- * {@link IndexBuilder} <b>must be thread safe</b> for each index update.
- */
-public class IndexBuildManager implements Stoppable {
-
-  private static final Log LOG = LogFactory.getLog(IndexBuildManager.class);
-  private final IndexBuilder delegate;
-  private QuickFailingTaskRunner pool;
-  private boolean stopped;
-
-  /**
-   * Set the number of threads with which we can concurrently build index updates. Unused threads
-   * will be released, but setting the number of threads too high could cause frequent swapping and
-   * resource contention on the server - <i>tune with care</i>. However, if you are spending a lot
-   * of time building index updates, it could be worthwhile to spend the time to tune this parameter
-   * as it could lead to dramatic increases in speed.
-   */
-  public static final String NUM_CONCURRENT_INDEX_BUILDER_THREADS_CONF_KEY = "index.builder.threads.max";
-  /** Default to a single thread. This is the safest course of action, but the slowest as well */
-  private static final int DEFAULT_CONCURRENT_INDEX_BUILDER_THREADS = 10;
-  /**
-   * Amount of time to keep idle threads in the pool. After this time (seconds) we expire the
-   * threads and will re-create them as needed, up to the configured max
-   */
-  private static final String INDEX_BUILDER_KEEP_ALIVE_TIME_CONF_KEY =
-      "index.builder.threads.keepalivetime";
-
-  /**
-   * @param env environment in which <tt>this</tt> is running. Used to setup the
-   *          {@link IndexBuilder} and executor
-   * @throws IOException if an {@link IndexBuilder} cannot be correctly steup
-   */
-  public IndexBuildManager(RegionCoprocessorEnvironment env) throws IOException {
-    this(getIndexBuilder(env), new QuickFailingTaskRunner(ThreadPoolManager.getExecutor(
-      getPoolBuilder(env), env)));
-  }
-
-  private static IndexBuilder getIndexBuilder(RegionCoprocessorEnvironment e) throws IOException {
-    Configuration conf = e.getConfiguration();
-    Class<? extends IndexBuilder> builderClass =
-        conf.getClass(Indexer.INDEX_BUILDER_CONF_KEY, null, IndexBuilder.class);
-    try {
-      IndexBuilder builder = builderClass.newInstance();
-      builder.setup(e);
-      return builder;
-    } catch (InstantiationException e1) {
-      throw new IOException("Couldn't instantiate index builder:" + builderClass
-          + ", disabling indexing on table " + e.getRegion().getTableDesc().getNameAsString());
-    } catch (IllegalAccessException e1) {
-      throw new IOException("Couldn't instantiate index builder:" + builderClass
-          + ", disabling indexing on table " + e.getRegion().getTableDesc().getNameAsString());
-    }
-  }
-
-  private static ThreadPoolBuilder getPoolBuilder(RegionCoprocessorEnvironment env) {
-    String serverName = env.getRegionServerServices().getServerName().getServerName();
-    return new ThreadPoolBuilder(serverName + "-index-builder", env.getConfiguration()).
-        setCoreTimeout(INDEX_BUILDER_KEEP_ALIVE_TIME_CONF_KEY).
-        setMaxThread(NUM_CONCURRENT_INDEX_BUILDER_THREADS_CONF_KEY,
-          DEFAULT_CONCURRENT_INDEX_BUILDER_THREADS);
-  }
-
-  public IndexBuildManager(IndexBuilder builder, QuickFailingTaskRunner pool) {
-    this.delegate = builder;
-    this.pool = pool;
-  }
-
-
-  public Collection<Pair<Mutation, byte[]>> getIndexUpdate(
-      MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp,
-      Collection<? extends Mutation> mutations) throws Throwable {
-    // notify the delegate that we have started processing a batch
-    this.delegate.batchStarted(miniBatchOp);
-
-    // parallelize each mutation into its own task
-    // each task is cancelable via two mechanisms: (1) underlying HRegion is closing (which would
-    // fail lookups/scanning) and (2) by stopping this via the #stop method. Interrupts will only be
-    // acknowledged on each thread before doing the actual lookup, but after that depends on the
-    // underlying builder to look for the closed flag.
-    TaskBatch<Collection<Pair<Mutation, byte[]>>> tasks =
-        new TaskBatch<Collection<Pair<Mutation, byte[]>>>(mutations.size());
-    for (final Mutation m : mutations) {
-      tasks.add(new Task<Collection<Pair<Mutation, byte[]>>>() {
-
-        @Override
-        public Collection<Pair<Mutation, byte[]>> call() throws IOException {
-          return delegate.getIndexUpdate(m);
-        }
-
-      });
-    }
-    List<Collection<Pair<Mutation, byte[]>>> allResults = null;
-    try {
-      allResults = pool.submitUninterruptible(tasks);
-    } catch (CancellationException e) {
-      throw e;
-    } catch (ExecutionException e) {
-      LOG.error("Found a failed index update!");
-      throw e.getCause();
-    }
-
-    // we can only get here if we get successes from each of the tasks, so each of these must have a
-    // correct result
-    Collection<Pair<Mutation, byte[]>> results = new ArrayList<Pair<Mutation, byte[]>>();
-    for (Collection<Pair<Mutation, byte[]>> result : allResults) {
-      assert result != null : "Found an unsuccessful result, but didn't propagate a failure earlier";
-      results.addAll(result);
-    }
-
-    return results;
-  }
-
-  public Collection<Pair<Mutation, byte[]>> getIndexUpdate(Delete delete) throws IOException {
-    // all we get is a single update, so it would probably just go slower if we needed to queue it
-    // up. It will increase underlying resource contention a little bit, but the mutation case is
-    // far more common, so let's not worry about it for now.
-    // short circuit so we don't waste time.
-    if (!this.delegate.isEnabled(delete)) {
-      return null;
-    }
-
-    return delegate.getIndexUpdate(delete);
-
-  }
-
-  public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(
-      Collection<KeyValue> filtered) throws IOException {
-    // this is run async, so we can take our time here
-    return delegate.getIndexUpdateForFilteredRows(filtered);
-  }
-
-  public void batchCompleted(MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) {
-    delegate.batchCompleted(miniBatchOp);
-  }
-
-  public void batchStarted(MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp)
-      throws IOException {
-    delegate.batchStarted(miniBatchOp);
-  }
-
-  public boolean isEnabled(Mutation m) throws IOException {
-    return delegate.isEnabled(m);
-  }
-
-  public byte[] getBatchId(Mutation m) {
-    return delegate.getBatchId(m);
-  }
-
-  @Override
-  public void stop(String why) {
-    if (stopped) {
-      return;
-    }
-    this.stopped = true;
-    this.delegate.stop(why);
-    this.pool.stop(why);
-  }
-
-  @Override
-  public boolean isStopped() {
-    return this.stopped;
-  }
-
-  public IndexBuilder getBuilderForTesting() {
-    return this.delegate;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuilder.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuilder.java
deleted file mode 100644
index f469482..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuilder.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.builder;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
-import org.apache.hadoop.hbase.util.Pair;
-
-import org.apache.hadoop.hbase.index.Indexer;
-
-/**
- * Interface to build updates ({@link Mutation}s) to the index tables, based on the primary table
- * updates.
- * <p>
- * Either all the index updates will be applied to all tables or the primary table will kill itself
- * and will attempt to replay the index edits through the WAL replay mechanism.
- */
-public interface IndexBuilder extends Stoppable {
-
-  /** Helper method signature to ensure people don't attempt to extend this class directly */
-  public void extendBaseIndexBuilderInstead();
-
-  /**
-   * This is always called exactly once on install of {@link Indexer}, before any calls
-   * {@link #getIndexUpdate} on
-   * @param env in which the builder is running
-   * @throws IOException on failure to setup the builder
-   */
-  public void setup(RegionCoprocessorEnvironment env) throws IOException;
-
-  /**
-   * Your opportunity to update any/all index tables based on the update of the primary table row.
-   * Its up to your implementation to ensure that timestamps match between the primary and index
-   * tables.
-   * <p>
-   * The mutation is a generic mutation (not a {@link Put} or a {@link Delete}), as it actually
-   * corresponds to a batch update. Its important to note that {@link Put}s always go through the
-   * batch update code path, so a single {@link Put} will come through here and update the primary
-   * table as the only update in the mutation.
-   * <p>
-   * Implementers must ensure that this method is thread-safe - it could (and probably will) be
-   * called concurrently for different mutations, which may or may not be part of the same batch.
-   * @param mutation update to the primary table to be indexed.
-   * @return a Map of the mutations to make -> target index table name
-   * @throws IOException on failure
-   */
-  public Collection<Pair<Mutation, byte[]>> getIndexUpdate(Mutation mutation) throws IOException;
-
-  /**
-   * The counter-part to {@link #getIndexUpdate(Mutation)} - your opportunity to update any/all
-   * index tables based on the delete of the primary table row. This is only called for cases where
-   * the client sends a single delete ({@link HTable#delete}). We separate this method from
-   * {@link #getIndexUpdate(Mutation)} only for the ease of implementation as the delete path has
-   * subtly different semantics for updating the families/timestamps from the generic batch path.
-   * <p>
-   * Its up to your implementation to ensure that timestamps match between the primary and index
-   * tables.
-   * <p>
-   * Implementers must ensure that this method is thread-safe - it could (and probably will) be
-   * called concurrently for different mutations, which may or may not be part of the same batch.
-   * @param delete {@link Delete} to the primary table that may be indexed
-   * @return a {@link Map} of the mutations to make -> target index table name
-   * @throws IOException on failure
-   */
-  public Collection<Pair<Mutation, byte[]>> getIndexUpdate(Delete delete) throws IOException;
-
-  /**
-   * Build an index update to cleanup the index when we remove {@link KeyValue}s via the normal
-   * flush or compaction mechanisms.
-   * @param filtered {@link KeyValue}s that previously existed, but won't be included in further
-   *          output from HBase.
-   * @return a {@link Map} of the mutations to make -> target index table name
-   * @throws IOException on failure
-   */
-  public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(
-      Collection<KeyValue> filtered)
-      throws IOException;
-
-  /**
-   * Notification that a batch of updates has successfully been written.
-   * @param miniBatchOp the full batch operation that was written
-   */
-  public void batchCompleted(MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp);
-
-  /**
-   * Notification that a batch has been started.
-   * <p>
-   * Unfortunately, the way HBase has the coprocessor hooks setup, this is actually called
-   * <i>after</i> the {@link #getIndexUpdate} methods. Therefore, you will likely need an attribute
-   * on your {@link Put}/{@link Delete} to indicate it is a batch operation.
-   * @param miniBatchOp the full batch operation to be written
- * @throws IOException 
-   */
-  public void batchStarted(MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatchOp) throws IOException;
-
-  /**
-   * This allows the codec to dynamically change whether or not indexing should take place for a
-   * table. If it doesn't take place, we can save a lot of time on the regular Put patch. By making
-   * it dynamic, we can save offlining and then onlining a table just to turn indexing on.
-   * <p>
-   * We can also be smart about even indexing a given update here too - if the update doesn't
-   * contain any columns that we care about indexing, we can save the effort of analyzing the put
-   * and further.
-   * @param m mutation that should be indexed.
-   * @return <tt>true</tt> if indexing is enabled for the given table. This should be on a per-table
-   *         basis, as each codec is instantiated per-region.
- * @throws IOException 
-   */
-  public boolean isEnabled(Mutation m) throws IOException;
-
-  /**
-   * @param m mutation that has been received by the indexer and is waiting to be indexed
-   * @return the ID of batch to which the Mutation belongs, or <tt>null</tt> if the mutation is not
-   *         part of a batch.
-   */
-  public byte[] getBatchId(Mutation m);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuildingFailureException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuildingFailureException.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuildingFailureException.java
deleted file mode 100644
index 3d0ef14..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/builder/IndexBuildingFailureException.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.builder;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-
-/**
- * Unexpected failure while building index updates that wasn't caused by an {@link IOException}.
- * This should be used if there is some basic issue with indexing - and no matter of retries will
- * fix it.
- */
-@SuppressWarnings("serial")
-public class IndexBuildingFailureException extends DoNotRetryIOException {
-
-  /**
-   * Constructor for over the wire propagation. Generally, shouldn't be used since index failure
-   * should have an underlying cause to propagate.
-   * @param msg reason for the failure
-   */
-  public IndexBuildingFailureException(String msg) {
-    super(msg);
-  }
-
-  /**
-   * @param msg reason
-   * @param cause underlying cause for the failure
-   */
-  public IndexBuildingFailureException(String msg, Throwable cause) {
-    super(msg, cause);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/Batch.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/Batch.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/Batch.java
deleted file mode 100644
index 088203e..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/Batch.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hbase.KeyValue;
-
-/**
- * A collection of {@link KeyValue KeyValues} to the primary table
- */
-public class Batch {
-
-  private static final long pointDeleteCode = KeyValue.Type.Delete.getCode();
-  private final long timestamp;
-  private List<KeyValue> batch = new ArrayList<KeyValue>();
-  private boolean allPointDeletes = true;
-
-  /**
-   * @param ts
-   */
-  public Batch(long ts) {
-    this.timestamp = ts;
-  }
-
-  public void add(KeyValue kv){
-    if (pointDeleteCode != kv.getType()) {
-      allPointDeletes = false;
-    }
-    batch.add(kv);
-  }
-
-  public boolean isAllPointDeletes() {
-    return allPointDeletes;
-  }
-
-  public long getTimestamp() {
-    return this.timestamp;
-  }
-
-  public List<KeyValue> getKvs() {
-    return this.batch;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/bbacf6e0/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/CoveredColumns.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/CoveredColumns.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/CoveredColumns.java
deleted file mode 100644
index dffbe4e..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/index/covered/CoveredColumns.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.index.covered;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.index.covered.update.ColumnReference;
-
-/**
- * Manage a set of {@link ColumnReference}s for the {@link LocalTableState}.
- */
-public class CoveredColumns {
-
-  Set<ColumnReference> columns = new HashSet<ColumnReference>();
-
-  public Collection<? extends ColumnReference> findNonCoveredColumns(
-      Collection<? extends ColumnReference> columns2) {
-    List<ColumnReference> uncovered = new ArrayList<ColumnReference>();
-    for (ColumnReference column : columns2) {
-      if (!columns.contains(column)) {
-        uncovered.add(column);
-      }
-    }
-    return uncovered;
-  }
-
-  public void addColumn(ColumnReference column) {
-    this.columns.add(column);
-  }
-}