You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/03/23 06:16:02 UTC

[2/2] hbase git commit: HBASE-17809 cleanup unused class

HBASE-17809 cleanup unused class


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fe3c32eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fe3c32eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fe3c32eb

Branch: refs/heads/master
Commit: fe3c32ebd56da9c7852c70951cb9f8fadfdf7719
Parents: f2d1b8d
Author: CHIA-PING TSAI <ch...@gmail.com>
Authored: Mon Mar 20 17:40:28 2017 +0800
Committer: CHIA-PING TSAI <ch...@gmail.com>
Committed: Thu Mar 23 14:15:28 2017 +0800

----------------------------------------------------------------------
 dev-support/findbugs-exclude.xml                |    8 -
 .../client/DelegatingRetryingCallable.java      |   64 -
 .../hbase/client/ScannerTimeoutException.java   |   44 -
 .../hbase/exceptions/LockTimeoutException.java  |   43 -
 .../exceptions/OperationConflictException.java  |   49 -
 .../quotas/InvalidQuotaSettingsException.java   |   32 -
 .../apache/hadoop/hbase/ShareableMemory.java    |   39 -
 .../hadoop/hbase/util/BoundedArrayQueue.java    |   81 --
 .../hadoop/hbase/util/ChecksumFactory.java      |   99 --
 .../hbase/util/TestBoundedArrayQueue.java       |   60 -
 .../encode/tokenize/TokenDepthComparator.java   |   64 -
 .../hadoop/hbase/regionserver/LruHashMap.java   | 1102 ------------------
 .../regionserver/RegionMergeTransaction.java    |  248 ----
 .../org/apache/hadoop/hbase/util/MetaUtils.java |  155 ---
 .../hadoop/hbase/util/SortedCopyOnWriteSet.java |  177 ---
 .../apache/hadoop/hbase/io/TestHeapSize.java    |    2 +-
 .../master/TestDistributedLogSplitting.java     |    3 +-
 .../hadoop/hbase/util/MultiThreadedUpdater.java |    3 +-
 .../hbase/util/TestSortedCopyOnWriteSet.java    |  106 --
 19 files changed, 3 insertions(+), 2376 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml
index 37d5746..3162cb2 100644
--- a/dev-support/findbugs-exclude.xml
+++ b/dev-support/findbugs-exclude.xml
@@ -66,14 +66,6 @@
   </Match>
 
   <Match>
-    <Class name="org.apache.hadoop.hbase.regionserver.LruHashMap"/>
-    <Or>
-      <Method name="equals"/>
-    </Or>
-    <Bug pattern="EQ_UNUSUAL"/>
-  </Match>
-
-  <Match>
     <Class name="org.apache.hadoop.hbase.util.ByteBufferUtils"/>
     <Or>
       <Method name="putInt"/>

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
deleted file mode 100644
index b7d77f3..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Helper callable for internal use when you just want to override a single method of a {@link
- * RetryingCallable}. By default, this just delegates all {@link RetryingCallable} methods to the
- * specified delegate.
- * @param <T> Result class from calls to the delegate {@link RetryingCallable}
- * @param <D> Type of the delegate class
- */
-@InterfaceAudience.Private
-public class DelegatingRetryingCallable<T, D extends RetryingCallable<T>> implements
-    RetryingCallable<T> {
-  protected final D delegate;
-
-  public DelegatingRetryingCallable(D delegate) {
-    this.delegate = delegate;
-  }
-
-  @Override
-  public T call(int callTimeout) throws Exception {
-    return delegate.call(callTimeout);
-  }
-
-  @Override
-  public void prepare(boolean reload) throws IOException {
-    delegate.prepare(reload);
-  }
-
-  @Override
-  public void throwable(Throwable t, boolean retrying) {
-    delegate.throwable(t, retrying);
-  }
-
-  @Override
-  public String getExceptionMessageAdditionalDetail() {
-    return delegate.getExceptionMessageAdditionalDetail();
-  }
-
-  @Override
-  public long sleep(long pause, int tries) {
-    return delegate.sleep(pause, tries);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
deleted file mode 100644
index 9e0827c..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.client;
-
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * Thrown when a scanner has timed out.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class ScannerTimeoutException extends DoNotRetryIOException {
-
-  private static final long serialVersionUID = 8788838690290688313L;
-
-  /** default constructor */
-  ScannerTimeoutException() {
-    super();
-  }
-
-  /** @param s */
-  ScannerTimeoutException(String s) {
-    super(s);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
deleted file mode 100644
index b6b3c32..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.exceptions;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-
-/**
- * Thrown when there is a timeout when trying to acquire a lock
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class LockTimeoutException extends DoNotRetryIOException {
-
-  private static final long serialVersionUID = -1770764924258999825L;
-
-  /** Default constructor */
-  public LockTimeoutException() {
-    super();
-  }
-
-  public LockTimeoutException(String s) {
-    super(s);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OperationConflictException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OperationConflictException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OperationConflictException.java
deleted file mode 100644
index c40b8d9..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OperationConflictException.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.exceptions;
-
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * The exception that is thrown if there's duplicate execution of non-idempotent operation.
- * Client should not retry; may use "get" to get the desired value.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class OperationConflictException extends DoNotRetryIOException {
-  private static final long serialVersionUID = -8930333627489862872L;
-
-  public OperationConflictException() {
-    super();
-  }
-
-  public OperationConflictException(String message) {
-    super(message);
-  }
-
-  public OperationConflictException(Throwable cause) {
-    super(cause);
-  }
-
-  public OperationConflictException(String message, Throwable cause) {
-    super(message, cause);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
deleted file mode 100644
index 54a1545..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.quotas;
-
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Generic quota exceeded exception for invalid settings
- */
-@InterfaceAudience.Private
-public class InvalidQuotaSettingsException extends DoNotRetryIOException {
-  public InvalidQuotaSettingsException(String msg) {
-    super(msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-common/src/main/java/org/apache/hadoop/hbase/ShareableMemory.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ShareableMemory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ShareableMemory.java
deleted file mode 100644
index 6a6ae59..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ShareableMemory.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * A cell implementing this interface would mean that the memory area backing this cell will refer
- * to a memory area that could be part of a larger common memory area used by the RegionServer. This
- * might be the bigger memory chunk where the RPC requests are read into. If an exclusive instance
- * is required, use the {@link #cloneToCell()} to have the contents of the cell copied to an
- * exclusive memory area.
- */
-@InterfaceAudience.Private
-public interface ShareableMemory {
-
-  /**
-   * Does a deep copy of the contents to a new memory area and returns it in the form of a cell.
-   * @return The deep cloned cell
-   */
-  Cell cloneToCell();
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedArrayQueue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedArrayQueue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedArrayQueue.java
deleted file mode 100644
index 9db4c5c..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedArrayQueue.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.util.AbstractQueue;
-import java.util.Iterator;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * A bounded non-thread safe implementation of {@link java.util.Queue}.
- */
-@InterfaceAudience.Private
-public class BoundedArrayQueue<E> extends AbstractQueue<E> {
-
-  private Object[] items;
-  private int takeIndex, putIndex;
-  private int count;
-
-  public BoundedArrayQueue(int maxElements) {
-    items =  new Object[maxElements];
-  }
-
-  @Override
-  public int size() {
-    return count;
-  }
-
-  /**
-   * Not implemented and will throw {@link UnsupportedOperationException}
-   */
-  @Override
-  public Iterator<E> iterator() {
-    // We don't need this. Leaving it as not implemented.
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public boolean offer(E e) {
-    if (count == items.length) return false;
-    items[putIndex] = e;
-    if (++putIndex == items.length) putIndex = 0;
-    count++;
-    return true;
-  }
-
-  @Override
-  public E poll() {
-    return (count == 0) ? null : dequeue();
-  }
-
-  @SuppressWarnings("unchecked")
-  private E dequeue() {
-    E x = (E) items[takeIndex];
-    items[takeIndex] = null;
-    if (++takeIndex == items.length) takeIndex = 0;
-    count--;
-    return x;
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public E peek() {
-    return (E) items[takeIndex];
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
deleted file mode 100644
index 414832d..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.util.zip.Checksum;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Utility class that is used to generate a Checksum object.
- * The Checksum implementation is pluggable and an application
- * can specify their own class that implements their own
- * Checksum algorithm.
- */
-@InterfaceAudience.Private
-public class ChecksumFactory {
-
-  static private final Class<?>[] EMPTY_ARRAY = new Class[]{};
-
-  /**
-   * Create a new instance of a Checksum object.
-   * @return The newly created Checksum object
-   */
-  static public Checksum newInstance(String className) throws IOException {
-    try {
-      Class<?> clazz = getClassByName(className);
-      return (Checksum)newInstance(clazz);
-    } catch (ClassNotFoundException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * Returns a Constructor that can be used to create a Checksum object.
-   * @param className classname for which an constructor is created
-   * @return a new Constructor object
-   */
-  static public Constructor<?> newConstructor(String className)
-    throws IOException {
-    try {
-      Class<?> clazz = getClassByName(className);
-      Constructor<?> ctor = clazz.getDeclaredConstructor(EMPTY_ARRAY);
-      ctor.setAccessible(true);
-      return ctor;
-    } catch (ClassNotFoundException e) {
-      throw new IOException(e);
-    } catch (java.lang.NoSuchMethodException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /** Create an object for the given class and initialize it from conf
-   *
-   * @param theClass class of which an object is created
-   * @return a new object
-   */
-  static private <T> T newInstance(Class<T> theClass) {
-    T result;
-    try {
-      Constructor<T> ctor = theClass.getDeclaredConstructor(EMPTY_ARRAY);
-      ctor.setAccessible(true);
-      result = ctor.newInstance();
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-    return result;
-  }
-
-  /**
-   * Load a class by name.
-   * @param name the class name.
-   * @return the class object.
-   * @throws ClassNotFoundException if the class is not found.
-   */
-  static private Class<?> getClassByName(String name)
-    throws ClassNotFoundException {
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    return Class.forName(name, true, classLoader);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java
deleted file mode 100644
index 6d9c496..0000000
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ MiscTests.class, SmallTests.class })
-public class TestBoundedArrayQueue {
-
-  private int qMaxElements = 5;
-  private BoundedArrayQueue<Integer> queue = new BoundedArrayQueue<>(qMaxElements);
-
-  @Test
-  public void testBoundedArrayQueueOperations() throws Exception {
-    assertEquals(0, queue.size());
-    assertNull(queue.poll());
-    assertNull(queue.peek());
-    for(int i=0;i<qMaxElements;i++){
-      assertTrue(queue.offer(i));
-    }
-    assertEquals(qMaxElements, queue.size());
-    assertFalse(queue.offer(0));
-    assertEquals(0, queue.peek().intValue());
-    assertEquals(0, queue.peek().intValue());
-    for (int i = 0; i < qMaxElements; i++) {
-      assertEquals(i, queue.poll().intValue());
-    }
-    assertEquals(0, queue.size());
-    assertNull(queue.poll());
-    // Write after one cycle is over
-    assertTrue(queue.offer(100));
-    assertTrue(queue.offer(1000));
-    assertEquals(100, queue.peek().intValue());
-    assertEquals(100, queue.poll().intValue());
-    assertEquals(1000, queue.poll().intValue());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenDepthComparator.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenDepthComparator.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenDepthComparator.java
deleted file mode 100644
index 6ebf20b..0000000
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenDepthComparator.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize;
-
-import java.util.Comparator;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Determines order of nodes in the output array.  Maybe possible to optimize further.
- */
-@InterfaceAudience.Private
-public class TokenDepthComparator implements Comparator<TokenizerNode> {
-
-  @Override
-  public int compare(TokenizerNode a, TokenizerNode b) {
-    if(a==null){
-      throw new IllegalArgumentException("a cannot be null");
-    }
-    if(b==null){
-      throw new IllegalArgumentException("b cannot be null");
-    }
-
-    // put leaves at the end
-    if (!a.isLeaf() && b.isLeaf()) {
-      return -1;
-    }
-    if (a.isLeaf() && !b.isLeaf()) {
-      return 1;
-    }
-
-    if (a.isLeaf() && b.isLeaf()) {// keep leaves in sorted order (for debugability)
-      return a.getId() < b.getId() ? -1 : 1;
-    }
-
-    // compare depth
-    if (a.getTokenOffset() < b.getTokenOffset()) {
-      return -1;
-    }
-    if (a.getTokenOffset() > b.getTokenOffset()) {
-      return 1;
-    }
-
-    // if same depth, return lower id first. ids are unique
-    return a.getId() < b.getId() ? -1 : 1;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
deleted file mode 100644
index a339abf..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
+++ /dev/null
@@ -1,1102 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ClassSize;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * The LruHashMap is a memory-aware HashMap with a configurable maximum
- * memory footprint.
- * <p>
- * It maintains an ordered list of all entries in the/ map ordered by
- * access time.  When space needs to be freed becase the maximum has been
- * reached, or the application has asked to free memory, entries will be
- * evicted according to an LRU (least-recently-used) algorithm.  That is,
- * those entries which have not been accessed the longest will be evicted
- * first.
- * <p>
- * Both the Key and Value Objects used for this class must extend
- * <code>HeapSize</code> in order to track heap usage.
- * <p>
- * This class contains internal synchronization and is thread-safe.
- */
-@InterfaceAudience.Private
-public class LruHashMap<K extends HeapSize, V extends HeapSize>
-implements HeapSize, Map<K,V> {
-
-  private static final Log LOG = LogFactory.getLog(LruHashMap.class);
-
-  /** The default size (in bytes) of the LRU */
-  private static final long DEFAULT_MAX_MEM_USAGE = 50000;
-  /** The default capacity of the hash table */
-  private static final int DEFAULT_INITIAL_CAPACITY = 16;
-  /** The maxmum capacity of the hash table */
-  private static final int MAXIMUM_CAPACITY = 1 << 30;
-  /** The default load factor to use */
-  private static final float DEFAULT_LOAD_FACTOR = 0.75f;
-
-  /** Memory overhead of this Object (for HeapSize) */
-  private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG +
-    2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE +
-    1 * ClassSize.ARRAY;
-
-  /** Load factor allowed (usually 75%) */
-  private final float loadFactor;
-  /** Number of key/vals in the map */
-  private int size;
-  /** Size at which we grow hash */
-  private int threshold;
-  /** Entries in the map */
-  private Entry [] entries;
-
-  /** Pointer to least recently used entry */
-  private Entry<K,V> headPtr;
-  /** Pointer to most recently used entry */
-  private Entry<K,V> tailPtr;
-
-  /** Maximum memory usage of this map */
-  private long memTotal = 0;
-  /** Amount of available memory */
-  private long memFree = 0;
-
-  /** Number of successful (found) get() calls */
-  private long hitCount = 0;
-  /** Number of unsuccessful (not found) get() calls */
-  private long missCount = 0;
-
-  /**
-   * Constructs a new, empty map with the specified initial capacity,
-   * load factor, and maximum memory usage.
-   *
-   * @param initialCapacity the initial capacity
-   * @param loadFactor the load factor
-   * @param maxMemUsage the maximum total memory usage
-   * @throws IllegalArgumentException if the initial capacity is less than one
-   * @throws IllegalArgumentException if the initial capacity is greater than
-   * the maximum capacity
-   * @throws IllegalArgumentException if the load factor is &lt;= 0
-   * @throws IllegalArgumentException if the max memory usage is too small
-   * to support the base overhead
-   */
-  public LruHashMap(int initialCapacity, float loadFactor,
-  long maxMemUsage) {
-    if (initialCapacity < 1) {
-      throw new IllegalArgumentException("Initial capacity must be > 0");
-    }
-    if (initialCapacity > MAXIMUM_CAPACITY) {
-      throw new IllegalArgumentException("Initial capacity is too large");
-    }
-    if (loadFactor <= 0 || Float.isNaN(loadFactor)) {
-      throw new IllegalArgumentException("Load factor must be > 0");
-    }
-    if (maxMemUsage <= (OVERHEAD + initialCapacity * ClassSize.REFERENCE)) {
-      throw new IllegalArgumentException("Max memory usage too small to " +
-      "support base overhead");
-    }
-
-    /** Find a power of 2 >= initialCapacity */
-    int capacity = calculateCapacity(initialCapacity);
-    this.loadFactor = loadFactor;
-    this.threshold = calculateThreshold(capacity,loadFactor);
-    this.entries = new Entry[capacity];
-    this.memFree = maxMemUsage;
-    this.memTotal = maxMemUsage;
-    init();
-  }
-
-  /**
-   * Constructs a new, empty map with the specified initial capacity and
-   * load factor, and default maximum memory usage.
-   *
-   * @param initialCapacity the initial capacity
-   * @param loadFactor the load factor
-   * @throws IllegalArgumentException if the initial capacity is less than one
-   * @throws IllegalArgumentException if the initial capacity is greater than
-   * the maximum capacity
-   * @throws IllegalArgumentException if the load factor is &lt;= 0
-   */
-  public LruHashMap(int initialCapacity, float loadFactor) {
-    this(initialCapacity, loadFactor, DEFAULT_MAX_MEM_USAGE);
-  }
-
-  /**
-   * Constructs a new, empty map with the specified initial capacity and
-   * with the default load factor and maximum memory usage.
-   *
-   * @param initialCapacity the initial capacity
-   * @throws IllegalArgumentException if the initial capacity is less than one
-   * @throws IllegalArgumentException if the initial capacity is greater than
-   * the maximum capacity
-   */
-  public LruHashMap(int initialCapacity) {
-    this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_MAX_MEM_USAGE);
-  }
-
-  /**
-   * Constructs a new, empty map with the specified maximum memory usage
-   * and with default initial capacity and load factor.
-   *
-   * @param maxMemUsage the maximum total memory usage
-   * @throws IllegalArgumentException if the max memory usage is too small
-   * to support the base overhead
-   */
-  public LruHashMap(long maxMemUsage) {
-    this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR,
-    maxMemUsage);
-  }
-
-  /**
-   * Constructs a new, empty map with the default initial capacity,
-   * load factor and maximum memory usage.
-   */
-  public LruHashMap() {
-    this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR,
-    DEFAULT_MAX_MEM_USAGE);
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * Get the currently available memory for this LRU in bytes.
-   * This is (maxAllowed - currentlyUsed).
-   *
-   * @return currently available bytes
-   */
-  public synchronized long getMemFree() {
-    return memFree;
-  }
-
-  /**
-   * Get the maximum memory allowed for this LRU in bytes.
-   *
-   * @return maximum allowed bytes
-   */
-  public long getMemMax() {
-    return memTotal;
-  }
-
-  /**
-   * Get the currently used memory for this LRU in bytes.
-   *
-   * @return currently used memory in bytes
-   */
-  public long getMemUsed() {
-    return (memTotal - getMemFree()); // FindBugs IS2_INCONSISTENT_SYNC
-  }
-
-  /**
-   * Get the number of hits to the map.  This is the number of times
-   * a call to get() returns a matched key.
-   *
-   * @return number of hits
-   */
-  public long getHitCount() {
-    return hitCount;
-  }
-
-  /**
-   * Get the number of misses to the map.  This is the number of times
-   * a call to get() returns null.
-   *
-   * @return number of misses
-   */
-  public synchronized long getMissCount() {
-    return missCount; // FindBugs IS2_INCONSISTENT_SYNC
-  }
-
-  /**
-   * Get the hit ratio.  This is the number of hits divided by the
-   * total number of requests.
-   *
-   * @return hit ratio (double between 0 and 1)
-   */
-  public double getHitRatio() {
-    return (double)((double)hitCount/
-      ((double)(hitCount + getMissCount())));
-  }
-
-  /**
-   * Free the requested amount of memory from the LRU map.
-   *
-   * This will do LRU eviction from the map until at least as much
-   * memory as requested is freed.  This does not affect the maximum
-   * memory usage parameter.
-   *
-   * @param requestedAmount memory to free from LRU in bytes
-   * @return actual amount of memory freed in bytes
-   */
-  public synchronized long freeMemory(long requestedAmount) throws Exception {
-    if(requestedAmount > (getMemUsed() - getMinimumUsage())) {
-      return clearAll();
-    }
-    long freedMemory = 0;
-    while(freedMemory < requestedAmount) {
-      freedMemory += evictFromLru();
-    }
-    return freedMemory;
-  }
-
-  /**
-   * The total memory usage of this map
-   *
-   * @return memory usage of map in bytes
-   */
-  public long heapSize() {
-    return (memTotal - getMemFree());
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * Retrieves the value associated with the specified key.
-   *
-   * If an entry is found, it is updated in the LRU as the most recently
-   * used (last to be evicted) entry in the map.
-   *
-   * @param key the key
-   * @return the associated value, or null if none found
-   * @throws NullPointerException if key is null
-   */
-  public synchronized V get(Object key) {
-    checkKey((K)key);
-    int hash = hash(key);
-    int i = hashIndex(hash, entries.length);
-    Entry<K,V> e = entries[i];
-    while (true) {
-      if (e == null) {
-        missCount++;
-        return null;
-      }
-      if (e.hash == hash && isEqual(key, e.key))  {
-        // Hit!  Update position in LRU
-        hitCount++;
-        updateLru(e);
-        return e.value;
-      }
-      e = e.next;
-    }
-  }
-
-  /**
-   * Insert a key-value mapping into the map.
-   *
-   * Entry will be inserted as the most recently used.
-   *
-   * Both the key and value are required to be Objects and must
-   * implement the HeapSize interface.
-   *
-   * @param key the key
-   * @param value the value
-   * @return the value that was previously mapped to this key, null if none
-   * @throws UnsupportedOperationException if either objects do not
-   * implement HeapSize
-   * @throws NullPointerException if the key or value is null
-   */
-  public synchronized V put(K key, V value) {
-    checkKey(key);
-    checkValue(value);
-    int hash = hash(key);
-    int i = hashIndex(hash, entries.length);
-
-    // For old values
-    for (Entry<K,V> e = entries[i]; e != null; e = e.next) {
-      if (e.hash == hash && isEqual(key, e.key)) {
-        V oldValue = e.value;
-        long memChange = e.replaceValue(value);
-        checkAndFreeMemory(memChange);
-        // If replacing an old value for this key, update in LRU
-        updateLru(e);
-        return oldValue;
-      }
-    }
-    long memChange = addEntry(hash, key, value, i);
-    checkAndFreeMemory(memChange);
-    return null;
-  }
-
-  /**
-   * Deletes the mapping for the specified key if it exists.
-   *
-   * @param key the key of the entry to be removed from the map
-   * @return the value associated with the specified key, or null
-   * if no mapping exists.
-   */
-  public synchronized V remove(Object key) {
-    Entry<K,V> e = removeEntryForKey((K)key);
-    if(e == null) return null;
-    // Add freed memory back to available
-    memFree += e.heapSize();
-    return e.value;
-  }
-
-  /**
-   * Gets the size (number of entries) of the map.
-   *
-   * @return size of the map
-   */
-  public int size() {
-    return size;
-  }
-
-  /**
-   * Checks whether the map is currently empty.
-   *
-   * @return true if size of map is zero
-   */
-  public boolean isEmpty() {
-    return size == 0;
-  }
-
-  /**
-   * Clears all entries from the map.
-   *
-   * This frees all entries, tracking memory usage along the way.
-   * All references to entries are removed so they can be GC'd.
-   */
-  public synchronized void clear() {
-    memFree += clearAll();
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * Checks whether there is a value in the map for the specified key.
-   *
-   * Does not affect the LRU.
-   *
-   * @param key the key to check
-   * @return true if the map contains a value for this key, false if not
-   * @throws NullPointerException if the key is null
-   */
-  public synchronized boolean containsKey(Object key) {
-    checkKey((K)key);
-    int hash = hash(key);
-    int i = hashIndex(hash, entries.length);
-    Entry e = entries[i];
-    while (e != null) {
-      if (e.hash == hash && isEqual(key, e.key))
-          return true;
-      e = e.next;
-    }
-    return false;
-  }
-
-  /**
-   * Checks whether this is a mapping which contains the specified value.
-   *
-   * Does not affect the LRU.  This is an inefficient operation.
-   *
-   * @param value the value to check
-   * @return true if the map contains an entry for this value, false
-   * if not
-   * @throws NullPointerException if the value is null
-   */
-  public synchronized boolean containsValue(Object value) {
-    checkValue((V)value);
-    Entry[] tab = entries;
-    for (int i = 0; i < tab.length ; i++)
-      for (Entry e = tab[i] ; e != null ; e = e.next)
-          if (value.equals(e.value))
-            return true;
-    return false;
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * Enforces key constraints.  Null keys are not permitted and key must
-   * implement HeapSize.  It should not be necessary to verify the second
-   * constraint because that's enforced on instantiation?
-   *
-   * Can add other constraints in the future.
-   *
-   * @param key the key
-   * @throws NullPointerException if the key is null
-   * @throws UnsupportedOperationException if the key class does not
-   * implement the HeapSize interface
-   */
-  private void checkKey(K key) {
-    if(key == null) {
-      throw new NullPointerException("null keys are not allowed");
-    }
-  }
-
-  /**
-   * Enforces value constraints.  Null values are not permitted and value must
-   * implement HeapSize.  It should not be necessary to verify the second
-   * constraint because that's enforced on instantiation?
-   *
-   * Can add other contraints in the future.
-   *
-   * @param value the value
-   * @throws NullPointerException if the value is null
-   * @throws UnsupportedOperationException if the value class does not
-   * implement the HeapSize interface
-   */
-  private void checkValue(V value) {
-    if(value == null) {
-      throw new NullPointerException("null values are not allowed");
-    }
-  }
-
-  /**
-   * Returns the minimum memory usage of the base map structure.
-   *
-   * @return baseline memory overhead of object in bytes
-   */
-  private long getMinimumUsage() {
-    return OVERHEAD + (entries.length * ClassSize.REFERENCE);
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * Evicts and frees based on LRU until at least as much memory as requested
-   * is available.
-   *
-   * @param memNeeded the amount of memory needed in bytes
-   */
-  private void checkAndFreeMemory(long memNeeded) {
-    while(memFree < memNeeded) {
-      evictFromLru();
-    }
-    memFree -= memNeeded;
-  }
-
-  /**
-   * Evicts based on LRU.  This removes all references and updates available
-   * memory.
-   *
-   * @return amount of memory freed in bytes
-   */
-  private long evictFromLru() {
-    long freed = headPtr.heapSize();
-    memFree += freed;
-    removeEntry(headPtr);
-    return freed;
-  }
-
-  /**
-   * Moves the specified entry to the most recently used slot of the
-   * LRU.  This is called whenever an entry is fetched.
-   *
-   * @param e entry that was accessed
-   */
-  private void updateLru(Entry<K,V> e) {
-    Entry<K,V> prev = e.getPrevPtr();
-    Entry<K,V> next = e.getNextPtr();
-    if(next != null) {
-      if(prev != null) {
-        prev.setNextPtr(next);
-        next.setPrevPtr(prev);
-      } else {
-        headPtr = next;
-        headPtr.setPrevPtr(null);
-      }
-      e.setNextPtr(null);
-      e.setPrevPtr(tailPtr);
-      tailPtr.setNextPtr(e);
-      tailPtr = e;
-    }
-  }
-
-  /**
-   * Removes the specified entry from the map and LRU structure.
-   *
-   * @param entry entry to be removed
-   */
-  private void removeEntry(Entry<K,V> entry) {
-    K k = entry.key;
-    int hash = entry.hash;
-    int i = hashIndex(hash, entries.length);
-    Entry<K,V> prev = entries[i];
-    Entry<K,V> e = prev;
-
-    while (e != null) {
-      Entry<K,V> next = e.next;
-      if (e.hash == hash && isEqual(k, e.key)) {
-          size--;
-          if (prev == e) {
-            entries[i] = next;
-          } else {
-            prev.next = next;
-          }
-
-          Entry<K,V> prevPtr = e.getPrevPtr();
-          Entry<K,V> nextPtr = e.getNextPtr();
-
-          if(prevPtr != null && nextPtr != null) {
-            prevPtr.setNextPtr(nextPtr);
-            nextPtr.setPrevPtr(prevPtr);
-          } else if(prevPtr != null) {
-            tailPtr = prevPtr;
-            prevPtr.setNextPtr(null);
-          } else if(nextPtr != null) {
-            headPtr = nextPtr;
-            nextPtr.setPrevPtr(null);
-          }
-
-          return;
-      }
-      prev = e;
-      e = next;
-    }
-  }
-
-  /**
-   * Removes and returns the entry associated with the specified
-   * key.
-   *
-   * @param key key of the entry to be deleted
-   * @return entry that was removed, or null if none found
-   */
-  private Entry<K,V> removeEntryForKey(K key) {
-    int hash = hash(key);
-    int i = hashIndex(hash, entries.length);
-    Entry<K,V> prev = entries[i];
-    Entry<K,V> e = prev;
-
-    while (e != null) {
-      Entry<K,V> next = e.next;
-      if (e.hash == hash && isEqual(key, e.key)) {
-          size--;
-          if (prev == e) {
-            entries[i] = next;
-          } else {
-            prev.next = next;
-          }
-
-          // Updating LRU
-          Entry<K,V> prevPtr = e.getPrevPtr();
-          Entry<K,V> nextPtr = e.getNextPtr();
-          if(prevPtr != null && nextPtr != null) {
-            prevPtr.setNextPtr(nextPtr);
-            nextPtr.setPrevPtr(prevPtr);
-          } else if(prevPtr != null) {
-            tailPtr = prevPtr;
-            prevPtr.setNextPtr(null);
-          } else if(nextPtr != null) {
-            headPtr = nextPtr;
-            nextPtr.setPrevPtr(null);
-          }
-
-          return e;
-      }
-      prev = e;
-      e = next;
-    }
-
-    return e;
-  }
-
- /**
-  * Adds a new entry with the specified key, value, hash code, and
-  * bucket index to the map.
-  *
-  * Also puts it in the bottom (most-recent) slot of the list and
-  * checks to see if we need to grow the array.
-  *
-  * @param hash hash value of key
-  * @param key the key
-  * @param value the value
-  * @param bucketIndex index into hash array to store this entry
-  * @return the amount of heap size used to store the new entry
-  */
-  private long addEntry(int hash, K key, V value, int bucketIndex) {
-    Entry<K,V> e = entries[bucketIndex];
-    Entry<K,V> newE = new Entry<>(hash, key, value, e, tailPtr);
-    entries[bucketIndex] = newE;
-    // add as most recently used in lru
-    if (size == 0) {
-      headPtr = newE;
-      tailPtr = newE;
-    } else {
-      newE.setPrevPtr(tailPtr);
-      tailPtr.setNextPtr(newE);
-      tailPtr = newE;
-    }
-    // Grow table if we are past the threshold now
-    if (size++ >= threshold) {
-      growTable(2 * entries.length);
-    }
-    return newE.heapSize();
-  }
-
-  /**
-   * Clears all the entries in the map.  Tracks the amount of memory being
-   * freed along the way and returns the total.
-   *
-   * Cleans up all references to allow old entries to be GC'd.
-   *
-   * @return total memory freed in bytes
-   */
-  private long clearAll() {
-    Entry cur;
-    long freedMemory = 0;
-    for(int i=0; i<entries.length; i++) {
-      cur = entries[i];
-      while(cur != null) {
-        freedMemory += cur.heapSize();
-        cur = cur.next;
-      }
-      entries[i] = null;
-    }
-    headPtr = null;
-    tailPtr = null;
-    size = 0;
-    return freedMemory;
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * Recreates the entire contents of the hashmap into a new array
-   * with double the capacity.  This method is called when the number of
-   * keys in the map reaches the current threshold.
-   *
-   * @param newCapacity the new size of the hash entries
-   */
-  private void growTable(int newCapacity) {
-    Entry [] oldTable = entries;
-    int oldCapacity = oldTable.length;
-
-    // Do not allow growing the table beyond the max capacity
-    if (oldCapacity == MAXIMUM_CAPACITY) {
-      threshold = Integer.MAX_VALUE;
-      return;
-    }
-
-    // Determine how much additional space will be required to grow the array
-    long requiredSpace = (newCapacity - oldCapacity) * ClassSize.REFERENCE;
-
-    // Verify/enforce we have sufficient memory to grow
-    checkAndFreeMemory(requiredSpace);
-
-    Entry [] newTable = new Entry[newCapacity];
-
-    // Transfer existing entries to new hash table
-    for(int i=0; i < oldCapacity; i++) {
-      Entry<K,V> entry = oldTable[i];
-      if(entry != null) {
-        // Set to null for GC
-        oldTable[i] = null;
-        do {
-          Entry<K,V> next = entry.next;
-          int idx = hashIndex(entry.hash, newCapacity);
-          entry.next = newTable[idx];
-          newTable[idx] = entry;
-          entry = next;
-        } while(entry != null);
-      }
-    }
-
-    entries = newTable;
-    threshold = (int)(newCapacity * loadFactor);
-  }
-
-  /**
-   * Gets the hash code for the specified key.
-   * This implementation uses the additional hashing routine
-   * from JDK 1.4.
-   *
-   * @param key the key to get a hash value for
-   * @return the hash value
-   */
-  private int hash(Object key) {
-    int h = key.hashCode();
-    h += ~(h << 9);
-    h ^=  (h >>> 14);
-    h +=  (h << 4);
-    h ^=  (h >>> 10);
-    return h;
-  }
-
-  /**
-   * Compares two objects for equality.  Method uses equals method and
-   * assumes neither value is null.
-   *
-   * @param x the first value
-   * @param y the second value
-   * @return true if equal
-   */
-  private boolean isEqual(Object x, Object y) {
-    return (x == y || x.equals(y));
-  }
-
-  /**
-   * Determines the index into the current hash table for the specified
-   * hashValue.
-   *
-   * @param hashValue the hash value
-   * @param length the current number of hash buckets
-   * @return the index of the current hash array to use
-   */
-  private int hashIndex(int hashValue, int length) {
-    return hashValue & (length - 1);
-  }
-
-  /**
-   * Calculates the capacity of the array backing the hash
-   * by normalizing capacity to a power of 2 and enforcing
-   * capacity limits.
-   *
-   * @param proposedCapacity the proposed capacity
-   * @return the normalized capacity
-   */
-  private int calculateCapacity(int proposedCapacity) {
-    int newCapacity = 1;
-    if(proposedCapacity > MAXIMUM_CAPACITY) {
-      newCapacity = MAXIMUM_CAPACITY;
-    } else {
-      while(newCapacity < proposedCapacity) {
-        newCapacity <<= 1;
-      }
-      if(newCapacity > MAXIMUM_CAPACITY) {
-        newCapacity = MAXIMUM_CAPACITY;
-      }
-    }
-    return newCapacity;
-  }
-
-  /**
-   * Calculates the threshold of the map given the capacity and load
-   * factor.  Once the number of entries in the map grows to the
-   * threshold we will double the size of the array.
-   *
-   * @param capacity the size of the array
-   * @param factor the load factor of the hash
-   */
-  private int calculateThreshold(int capacity, float factor) {
-    return (int)(capacity * factor);
-  }
-
-  /**
-   * Set the initial heap usage of this class.  Includes class variable
-   * overhead and the entry array.
-   */
-  private void init() {
-    memFree -= OVERHEAD;
-    memFree -= (entries.length * ClassSize.REFERENCE);
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * Debugging function that returns a List sorted by access time.
-   *
-   * The order is oldest to newest (first in list is next to be evicted).
-   *
-   * @return Sorted list of entries
-   */
-  public List<Entry<K,V>> entryLruList() {
-    List<Entry<K,V>> entryList = new ArrayList<>();
-    Entry<K,V> entry = headPtr;
-    while(entry != null) {
-      entryList.add(entry);
-      entry = entry.getNextPtr();
-    }
-    return entryList;
-  }
-
-  /**
-   * Debugging function that returns a Set of all entries in the hash table.
-   *
-   * @return Set of entries in hash
-   */
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
-      justification="Unused debugging function that reads only")
-  public Set<Entry<K,V>> entryTableSet() {
-    Set<Entry<K,V>> entrySet = new HashSet<>();
-    Entry [] table = entries; // FindBugs IS2_INCONSISTENT_SYNC
-    for(int i=0;i<table.length;i++) {
-      for(Entry e = table[i]; e != null; e = e.next) {
-        entrySet.add(e);
-      }
-    }
-    return entrySet;
-  }
-
-  /**
-   * Get the head of the linked list (least recently used).
-   *
-   * @return head of linked list
-   */
-  public Entry getHeadPtr() {
-    return headPtr;
-  }
-
-  /**
-   * Get the tail of the linked list (most recently used).
-   *
-   * @return tail of linked list
-   */
-  public Entry getTailPtr() {
-    return tailPtr;
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * To best optimize this class, some of the methods that are part of a
-   * Map implementation are not supported.  This is primarily related
-   * to being able to get Sets and Iterators of this map which require
-   * significant overhead and code complexity to support and are
-   * unnecessary for the requirements of this class.
-   */
-
-  /**
-   * Intentionally unimplemented.
-   */
-  public Set<Map.Entry<K,V>> entrySet() {
-    throw new UnsupportedOperationException(
-    "entrySet() is intentionally unimplemented");
-  }
-
-  /**
-   * Intentionally unimplemented.
-   */
-  public boolean equals(Object o) {
-    throw new UnsupportedOperationException(
-    "equals(Object) is intentionally unimplemented");
-  }
-
-  /**
-   * Intentionally unimplemented.
-   */
-  public int hashCode() {
-    throw new UnsupportedOperationException(
-    "hashCode(Object) is intentionally unimplemented");
-  }
-
-  /**
-   * Intentionally unimplemented.
-   */
-  public Set<K> keySet() {
-    throw new UnsupportedOperationException(
-    "keySet() is intentionally unimplemented");
-  }
-
-  /**
-   * Intentionally unimplemented.
-   */
-  public void putAll(Map<? extends K, ? extends V> m) {
-    throw new UnsupportedOperationException(
-    "putAll() is intentionally unimplemented");
-  }
-
-  /**
-   * Intentionally unimplemented.
-   */
-  public Collection<V> values() {
-    throw new UnsupportedOperationException(
-    "values() is intentionally unimplemented");
-  }
-
-  //--------------------------------------------------------------------------
-  /**
-   * Entry to store key/value mappings.
-   * <p>
-   * Contains previous and next pointers for the doubly linked-list which is
-   * used for LRU eviction.
-   * <p>
-   * Instantiations of this class are memory aware.  Both the key and value
-   * classes used must also implement <code>HeapSize</code>.
-   */
-  protected static class Entry<K extends HeapSize, V extends HeapSize>
-  implements Map.Entry<K,V>, HeapSize {
-    /** The baseline overhead memory usage of this class */
-    static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG +
-      5 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT;
-
-    /** The key */
-    protected final K key;
-    /** The value */
-    protected V value;
-    /** The hash value for this entries key */
-    protected final int hash;
-    /** The next entry in the hash chain (for collisions) */
-    protected Entry<K,V> next;
-
-    /** The previous entry in the LRU list (towards LRU) */
-    protected Entry<K,V> prevPtr;
-    /** The next entry in the LRU list (towards MRU) */
-    protected Entry<K,V> nextPtr;
-
-    /** The precomputed heap size of this entry */
-    protected long heapSize;
-
-    /**
-     * Create a new entry.
-     *
-     * @param h the hash value of the key
-     * @param k the key
-     * @param v the value
-     * @param nextChainPtr the next entry in the hash chain, null if none
-     * @param prevLruPtr the previous entry in the LRU
-     */
-    Entry(int h, K k, V v, Entry<K,V> nextChainPtr, Entry<K,V> prevLruPtr) {
-      value = v;
-      next = nextChainPtr;
-      key = k;
-      hash = h;
-      prevPtr = prevLruPtr;
-      nextPtr = null;
-      // Pre-compute heap size
-      heapSize = OVERHEAD + k.heapSize() + v.heapSize();
-    }
-
-    /**
-     * Get the key of this entry.
-     *
-     * @return the key associated with this entry
-     */
-    public K getKey() {
-      return key;
-    }
-
-    /**
-     * Get the value of this entry.
-     *
-     * @return the value currently associated with this entry
-     */
-    public V getValue() {
-      return value;
-    }
-
-    /**
-     * Set the value of this entry.
-     *
-     * It is not recommended to use this method when changing the value.
-     * Rather, using <code>replaceValue</code> will return the difference
-     * in heap usage between the previous and current values.
-     *
-     * @param newValue the new value to associate with this entry
-     * @return the value previously associated with this entry
-     */
-    public V setValue(V newValue) {
-      V oldValue = value;
-      value = newValue;
-      return oldValue;
-    }
-
-    /**
-     * Replace the value of this entry.
-     *
-     * Computes and returns the difference in heap size when changing
-     * the value associated with this entry.
-     *
-     * @param newValue the new value to associate with this entry
-     * @return the change in heap usage of this entry in bytes
-     */
-    protected long replaceValue(V newValue) {
-      long sizeDiff = newValue.heapSize() - value.heapSize();
-      value = newValue;
-      heapSize += sizeDiff;
-      return sizeDiff;
-    }
-
-    /**
-     * Returns true is the specified entry has the same key and the
-     * same value as this entry.
-     *
-     * @param o entry to test against current
-     * @return true is entries have equal key and value, false if no
-     */
-    public boolean equals(Object o) {
-      if (!(o instanceof Map.Entry))
-          return false;
-      Map.Entry e = (Map.Entry)o;
-      Object k1 = getKey();
-      Object k2 = e.getKey();
-      if (k1 == k2 || (k1 != null && k1.equals(k2))) {
-          Object v1 = getValue();
-          Object v2 = e.getValue();
-          if (v1 == v2 || (v1 != null && v1.equals(v2)))
-            return true;
-      }
-      return false;
-    }
-
-    /**
-     * Returns the hash code of the entry by xor'ing the hash values
-     * of the key and value of this entry.
-     *
-     * @return hash value of this entry
-     */
-    public int hashCode() {
-      return (key.hashCode() ^ value.hashCode());
-    }
-
-    /**
-     * Returns String representation of the entry in form "key=value"
-     *
-     * @return string value of entry
-     */
-    public String toString() {
-      return getKey() + "=" + getValue();
-    }
-
-    //------------------------------------------------------------------------
-    /**
-     * Sets the previous pointer for the entry in the LRU.
-     * @param prevPtr previous entry
-     */
-    protected void setPrevPtr(Entry<K,V> prevPtr){
-      this.prevPtr = prevPtr;
-    }
-
-    /**
-     * Returns the previous pointer for the entry in the LRU.
-     * @return previous entry
-     */
-    protected Entry<K,V> getPrevPtr(){
-      return prevPtr;
-    }
-
-    /**
-     * Sets the next pointer for the entry in the LRU.
-     * @param nextPtr next entry
-     */
-    protected void setNextPtr(Entry<K,V> nextPtr){
-      this.nextPtr = nextPtr;
-    }
-
-    /**
-     * Returns the next pointer for the entry in teh LRU.
-     * @return next entry
-     */
-    protected Entry<K,V> getNextPtr(){
-      return nextPtr;
-    }
-
-    /**
-     * Returns the pre-computed and "deep" size of the Entry
-     * @return size of the entry in bytes
-     */
-    public long heapSize() {
-      return heapSize;
-    }
-  }
-}
-
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
deleted file mode 100644
index 4a3f52f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.security.User;
-
-/**
- * Executes region merge as a "transaction". It is similar with
- * SplitTransaction. Call {@link #prepare(RegionServerServices)} to setup the
- * transaction, {@link #execute(Server, RegionServerServices)} to run the
- * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if
- * execute fails.
- * 
- * <p>Here is an example of how you would use this interface:
- * <pre>
- *  RegionMergeTransactionFactory factory = new RegionMergeTransactionFactory(conf);
- *  RegionMergeTransaction mt = factory.create(parent, midKey)
- *    .registerTransactionListener(new TransactionListener() {
- *       public void transition(RegionMergeTransaction transaction,
- *         RegionMergeTransactionPhase from, RegionMergeTransactionPhase to) throws IOException {
- *         // ...
- *       }
- *       public void rollback(RegionMergeTransaction transaction,
- *         RegionMergeTransactionPhase from, RegionMergeTransactionPhase to) {
- *         // ...
- *       }
- *    });
- *  if (!mt.prepare()) return;
- *  try {
- *    mt.execute(server, services);
- *  } catch (IOException ioe) {
- *    try {
- *      mt.rollback(server, services);
- *      return;
- *    } catch (RuntimeException e) {
- *      // abort the server
- *    }
- *  }
- * </Pre>
- * <p>A merge transaction is not thread safe.  Callers must ensure a split is run by
- * one thread only.
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
-@InterfaceStability.Evolving
-public interface RegionMergeTransaction {
-  /**
-   * Each enum is a step in the merge transaction.
-   */
-  enum RegionMergeTransactionPhase {
-    STARTED,
-    /**
-     * Prepared
-     */
-    PREPARED,
-    /**
-     * Set region as in transition, set it into MERGING state.
-     */
-    SET_MERGING,
-    /**
-     * We created the temporary merge data directory.
-     */
-    CREATED_MERGE_DIR,
-    /**
-     * Closed the merging region A.
-     */
-    CLOSED_REGION_A,
-    /**
-     * The merging region A has been taken out of the server's online regions list.
-     */
-    OFFLINED_REGION_A,
-    /**
-     * Closed the merging region B.
-     */
-    CLOSED_REGION_B,
-    /**
-     * The merging region B has been taken out of the server's online regions list.
-     */
-    OFFLINED_REGION_B,
-    /**
-     * Started in on creation of the merged region.
-     */
-    STARTED_MERGED_REGION_CREATION,
-    /**
-     * Point of no return. If we got here, then transaction is not recoverable
-     * other than by crashing out the regionserver.
-     */
-    PONR,
-    /**
-     * Completed
-     */
-    COMPLETED
-  }
-
-  /**
-   * Split transaction journal entry
-   */
-  public interface JournalEntry {
-
-    /** @return the completed phase marked by this journal entry */
-    RegionMergeTransactionPhase getPhase();
-
-    /** @return the time of phase completion */
-    long getTimeStamp();
-  }
-
-  /**
-   * Split transaction listener
-   */
-  public interface TransactionListener {
-
-    /**
-     * Invoked when transitioning forward from one transaction phase to another
-     * @param transaction the transaction
-     * @param from the current phase
-     * @param to the next phase
-     * @throws IOException listener can throw this to abort
-     */
-    void transition(RegionMergeTransaction transaction, RegionMergeTransactionPhase from,
-        RegionMergeTransactionPhase to) throws IOException;
-
-    /**
-     * Invoked when rolling back a transaction from one transaction phase to the
-     * previous
-     * @param transaction the transaction
-     * @param from the current phase
-     * @param to the previous phase
-     */
-    void rollback(RegionMergeTransaction transaction, RegionMergeTransactionPhase from,
-        RegionMergeTransactionPhase to);
-  }
-
-  /**
-   * Check merge inputs and prepare the transaction.
-   * @param services
-   * @return <code>true</code> if the regions are mergeable else
-   *         <code>false</code> if they are not (e.g. its already closed, etc.).
-   * @throws IOException 
-   */
-  boolean prepare(RegionServerServices services) throws IOException;
-
-  /**
-   * Run the transaction.
-   * @param server Hosting server instance. Can be null when testing
-   * @param services Used to online/offline regions.
-   * @throws IOException If thrown, transaction failed. Call
-   *           {@link #rollback(Server, RegionServerServices)}
-   * @return merged region
-   * @throws IOException
-   * @see #rollback(Server, RegionServerServices)
-   * @deprecated use #execute(Server, RegionServerServices, User)
-   */
-  @Deprecated
-  Region execute(Server server, RegionServerServices services) throws IOException;
-
-  /**
-   * Run the transaction.
-   * @param server Hosting server instance. Can be null when testing
-   * @param services Used to online/offline regions.
-   * @param user
-   * @throws IOException If thrown, transaction failed. Call
-   *           {@link #rollback(Server, RegionServerServices)}
-   * @return merged region
-   * @throws IOException
-   * @see #rollback(Server, RegionServerServices, User)
-   */
-  Region execute(Server server, RegionServerServices services, User user) throws IOException;
-
-  /**
-   * Roll back a failed transaction
-   * @param server Hosting server instance (May be null when testing).
-   * @param services Services of regionserver, used to online regions.
-   * @throws IOException If thrown, rollback failed. Take drastic action.
-   * @return True if we successfully rolled back, false if we got to the point
-   *         of no return and so now need to abort the server to minimize
-   *         damage.
-   * @deprecated use #rollback(Server, RegionServerServices, User)
-   */
-  @Deprecated
-  boolean rollback(Server server, RegionServerServices services) throws IOException;
-
-  /**
-   * Roll back a failed transaction
-   * @param server Hosting server instance (May be null when testing).
-   * @param services Services of regionserver, used to online regions.
-   * @param user
-   * @throws IOException If thrown, rollback failed. Take drastic action.
-   * @return True if we successfully rolled back, false if we got to the point
-   *         of no return and so now need to abort the server to minimize
-   *         damage.
-   */
-  boolean rollback(Server server, RegionServerServices services, User user) throws IOException;
-
-  /**
-   * Register a listener for transaction preparation, execution, and possibly
-   * rollback phases.
-   * <p>A listener can abort a transaction by throwing an exception. 
-   * @param listener the listener
-   * @return 'this' for chaining
-   */
-  RegionMergeTransaction registerTransactionListener(TransactionListener listener);
-
-  /** @return merged region info */
-  HRegionInfo getMergedRegionInfo();
-
-  /**
-   * Get the journal for the transaction.
-   * <p>Journal entries are an opaque type represented as JournalEntry. They can
-   * also provide useful debugging information via their toString method.
-   * @return the transaction journal
-   */
-  List<JournalEntry> getJournal();
-
-  /**
-   * Get the Server running the transaction or rollback
-   * @return server instance
-   */
-  Server getServer();
-
-  /**
-   * Get the RegonServerServices of the server running the transaction or rollback
-   * @return region server services
-   */
-  RegionServerServices getRegionServerServices();
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
deleted file mode 100644
index 7c89f11..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.wal.WALFactory;
-
-/**
- * Contains utility methods for manipulating HBase meta tables.
- * Be sure to call {@link #shutdown()} when done with this class so it closes
- * resources opened during meta processing (ROOT, META, etc.).  Be careful
- * how you use this class.  If used during migrations, be careful when using
- * this class to check whether migration is needed.
- */
-@InterfaceAudience.Private
-public class MetaUtils {
-  private static final Log LOG = LogFactory.getLog(MetaUtils.class);
-  private final Configuration conf;
-  private final FSTableDescriptors descriptors;
-  private FileSystem fs;
-  private WALFactory walFactory;
-  private HRegion metaRegion;
-  private Map<byte [], HRegion> metaRegions = Collections.synchronizedSortedMap(
-    new TreeMap<byte [], HRegion>(Bytes.BYTES_COMPARATOR));
-
-  /** Default constructor
-   * @throws IOException e
-   */
-  public MetaUtils() throws IOException {
-    this(HBaseConfiguration.create());
-  }
-
-  /**
-   * @param conf Configuration
-   * @throws IOException e
-   */
-  public MetaUtils(Configuration conf) throws IOException {
-    this.conf = conf;
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
-    this.metaRegion = null;
-    this.descriptors = new FSTableDescriptors(conf);
-    initialize();
-  }
-
-  /**
-   * Verifies that DFS is available and that HBase is off-line.
-   * @throws IOException e
-   */
-  private void initialize() throws IOException {
-    this.fs = FileSystem.get(this.conf);
-  }
-
-  /**
-   * @return the WAL associated with the given region
-   * @throws IOException e
-   */
-  public synchronized WAL getLog(HRegionInfo info) throws IOException {
-    if (this.walFactory == null) {
-      String logName = 
-          HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis();
-      final Configuration walConf = new Configuration(this.conf);
-      FSUtils.setRootDir(walConf, fs.getHomeDirectory());
-      this.walFactory = new WALFactory(walConf, null, logName);
-    }
-    final byte[] region = info.getEncodedNameAsBytes();
-    final byte[] namespace = info.getTable().getNamespace();
-    return info.isMetaRegion() ? walFactory.getMetaWAL(region) : walFactory.getWAL(region,
-      namespace);
-  }
-
-  /**
-   * @return HRegion for meta region
-   * @throws IOException e
-   */
-  public synchronized HRegion getMetaRegion() throws IOException {
-    return this.metaRegion == null? openMetaRegion(): this.metaRegion;
-  }
-
-  /**
-   * Closes catalog regions if open. Also closes and deletes the WAL. You
-   * must call this method if you want to persist changes made during a
-   * MetaUtils edit session.
-   */
-  public synchronized void shutdown() {
-    if (this.metaRegion != null) {
-      try {
-        this.metaRegion.close();
-      } catch (IOException e) {
-        LOG.error("closing meta region", e);
-      } finally {
-        this.metaRegion = null;
-      }
-    }
-    try {
-      for (HRegion r: metaRegions.values()) {
-        LOG.info("CLOSING hbase:meta " + r.toString());
-        r.close();
-      }
-    } catch (IOException e) {
-      LOG.error("closing meta region", e);
-    } finally {
-      metaRegions.clear();
-    }
-    try {
-      if (this.walFactory != null) {
-        this.walFactory.close();
-      }
-    } catch (IOException e) {
-      LOG.error("closing WAL", e);
-    }
-  }
-
-  private synchronized HRegion openMetaRegion() throws IOException {
-    if (this.metaRegion != null) {
-      return this.metaRegion;
-    }
-    this.metaRegion = HRegion.openHRegion(HRegionInfo.FIRST_META_REGIONINFO,
-      descriptors.get(TableName.META_TABLE_NAME), getLog(HRegionInfo.FIRST_META_REGIONINFO),
-      this.conf);
-    this.metaRegion.compactStores();
-    return this.metaRegion;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
deleted file mode 100644
index 05e0f49..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util;
-
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Simple {@link java.util.SortedSet} implementation that uses an internal
- * {@link java.util.TreeSet} to provide ordering. All mutation operations
- * create a new copy of the <code>TreeSet</code> instance, so are very
- * expensive.  This class is only intended for use on small, very rarely
- * written collections that expect highly concurrent reads. Read operations
- * are performed on a reference to the internal <code>TreeSet</code> at the
- * time of invocation, so will not see any mutations to the collection during
- * their operation.
- *
- * <p>Note that due to the use of a {@link java.util.TreeSet} internally,
- * a {@link java.util.Comparator} instance must be provided, or collection
- * elements must implement {@link java.lang.Comparable}.
- * </p>
- * @param <E> A class implementing {@link java.lang.Comparable} or able to be
- * compared by a provided comparator.
- */
-@InterfaceAudience.Private
-public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
-  private volatile SortedSet<E> internalSet;
-
-  public SortedCopyOnWriteSet() {
-    this.internalSet = new TreeSet<>();
-  }
-
-  public SortedCopyOnWriteSet(Collection<? extends E> c) {
-    this.internalSet = new TreeSet<>(c);
-  }
-
-  public SortedCopyOnWriteSet(Comparator<? super E> comparator) {
-    this.internalSet = new TreeSet<>(comparator);
-  }
-
-  @Override
-  public int size() {
-    return internalSet.size();
-  }
-
-  @Override
-  public boolean isEmpty() {
-    return internalSet.isEmpty();
-  }
-
-  @Override
-  public boolean contains(Object o) {
-    return internalSet.contains(o);
-  }
-
-  @Override
-  public Iterator<E> iterator() {
-    return internalSet.iterator();
-  }
-
-  @Override
-  public Object[] toArray() {
-    return internalSet.toArray();
-  }
-
-  @Override
-  public <T> T[] toArray(T[] a) {
-    return internalSet.toArray(a);
-  }
-
-  @Override
-  public synchronized boolean add(E e) {
-    SortedSet<E> newSet = new TreeSet<>(internalSet);
-    boolean added = newSet.add(e);
-    internalSet = newSet;
-    return added;
-  }
-
-  @Override
-  public synchronized boolean remove(Object o) {
-    SortedSet<E> newSet = new TreeSet<>(internalSet);
-    boolean removed = newSet.remove(o);
-    internalSet = newSet;
-    return removed;
-  }
-
-  @Override
-  public boolean containsAll(Collection<?> c) {
-    return internalSet.containsAll(c);
-  }
-
-  @Override
-  public synchronized boolean addAll(Collection<? extends E> c) {
-    SortedSet<E> newSet = new TreeSet<>(internalSet);
-    boolean changed = newSet.addAll(c);
-    internalSet = newSet;
-    return changed;
-  }
-
-  @Override
-  public synchronized boolean retainAll(Collection<?> c) {
-    SortedSet<E> newSet = new TreeSet<>(internalSet);
-    boolean changed = newSet.retainAll(c);
-    internalSet = newSet;
-    return changed;
-  }
-
-  @Override
-  public synchronized boolean removeAll(Collection<?> c) {
-    SortedSet<E> newSet = new TreeSet<>(internalSet);
-    boolean changed = newSet.removeAll(c);
-    internalSet = newSet;
-    return changed;
-  }
-
-  @Override
-  public synchronized void clear() {
-    Comparator<? super E> comparator = internalSet.comparator();
-    if (comparator != null) {
-      internalSet = new TreeSet<>(comparator);
-    } else {
-      internalSet = new TreeSet<>();
-    }
-  }
-
-  @Override
-  public Comparator<? super E> comparator() {
-    return internalSet.comparator();
-  }
-
-  @Override
-  public SortedSet<E> subSet(E fromElement, E toElement) {
-    return internalSet.subSet(fromElement, toElement);
-  }
-
-  @Override
-  public SortedSet<E> headSet(E toElement) {
-    return internalSet.headSet(toElement);
-  }
-
-  @Override
-  public SortedSet<E> tailSet(E fromElement) {
-    return internalSet.tailSet(fromElement);
-  }
-
-  @Override
-  public E first() {
-    return internalSet.first();
-  }
-
-  @Override
-  public E last() {
-    return internalSet.last();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
index 2f33859..6b943a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
@@ -65,7 +65,7 @@ public class TestHeapSize  {
   private static final Log LOG = LogFactory.getLog(TestHeapSize.class);
   // List of classes implementing HeapSize
   // BatchOperation, BatchUpdate, BlockIndex, Entry, Entry<K,V>, HStoreKey
-  // KeyValue, LruBlockCache, LruHashMap<K,V>, Put, WALKey
+  // KeyValue, LruBlockCache, Put, WALKey
 
   @BeforeClass
   public static void beforeClass() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 4c8728f..b78bfd1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -82,7 +82,6 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
-import org.apache.hadoop.hbase.exceptions.OperationConflictException;
 import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
@@ -396,7 +395,7 @@ public class TestDistributedLogSplitting {
         try {
           ht.increment(incr);
           fail("should have thrown");
-        } catch (OperationConflictException ope) {
+        } catch (IOException ope) {
           LOG.debug("Caught as expected: " + ope.getMessage());
         }
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
index 1505fc1..f41a5cc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.exceptions.OperationConflictException;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
 import org.apache.hadoop.util.StringUtils;
@@ -297,7 +296,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
         }
         totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
       } catch (IOException e) {
-        if (ignoreNonceConflicts && (e instanceof OperationConflictException)) {
+        if (ignoreNonceConflicts) {
           LOG.info("Detected nonce conflict, ignoring: " + e.getMessage());
           totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
           return;