You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/02/02 06:49:36 UTC

[1/6] hbase git commit: HBASE-14810 Update Hadoop support description to explain "not tested" vs "not supported"

Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 0de221a19 -> fc5e698c0


HBASE-14810 Update Hadoop support description to explain "not tested" vs "not supported"


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9cd48712
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9cd48712
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9cd48712

Branch: refs/heads/hbase-12439
Commit: 9cd487129d5a0048216ff00ef15fdb8effc525ae
Parents: 0de221a
Author: Misty Stanley-Jones <ms...@cloudera.com>
Authored: Thu Jan 28 09:54:21 2016 -0800
Committer: Misty Stanley-Jones <ms...@cloudera.com>
Committed: Fri Jan 29 14:34:22 2016 -0800

----------------------------------------------------------------------
 src/main/asciidoc/_chapters/configuration.adoc  |  4 ++-
 .../asciidoc/_chapters/getting_started.adoc     |  1 +
 src/main/asciidoc/_chapters/preface.adoc        | 35 ++++++++++++++++++++
 src/main/asciidoc/_chapters/upgrading.adoc      |  2 +-
 4 files changed, 40 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9cd48712/src/main/asciidoc/_chapters/configuration.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc
index 495232f..8e71cea 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -28,7 +28,9 @@
 :experimental:
 
 This chapter expands upon the <<getting_started>> chapter to further explain configuration of Apache HBase.
-Please read this chapter carefully, especially the <<basic.prerequisites,Basic Prerequisites>> to ensure that your HBase testing and deployment goes smoothly, and prevent data loss.
+Please read this chapter carefully, especially the <<basic.prerequisites,Basic Prerequisites>>
+to ensure that your HBase testing and deployment goes smoothly, and prevent data loss.
+Familiarize yourself with <<hbase_supported_tested_definitions>> as well.
 
 == Configuration Files
 Apache HBase uses the same configuration system as Apache Hadoop.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9cd48712/src/main/asciidoc/_chapters/getting_started.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc
index 1b38e6e..7ef91b0 100644
--- a/src/main/asciidoc/_chapters/getting_started.adoc
+++ b/src/main/asciidoc/_chapters/getting_started.adoc
@@ -19,6 +19,7 @@
  */
 ////
 
+[[getting_started]]
 = Getting Started
 :doctype: book
 :numbered:

http://git-wip-us.apache.org/repos/asf/hbase/blob/9cd48712/src/main/asciidoc/_chapters/preface.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/preface.adoc b/src/main/asciidoc/_chapters/preface.adoc
index 50df7ff..877508c 100644
--- a/src/main/asciidoc/_chapters/preface.adoc
+++ b/src/main/asciidoc/_chapters/preface.adoc
@@ -70,4 +70,39 @@ Please use link:https://issues.apache.org/jira/browse/hbase[JIRA] to report non-
 
 To protect existing HBase installations from new vulnerabilities, please *do not* use JIRA to report security-related bugs. Instead, send your report to the mailing list private@apache.org, which allows anyone to send messages, but restricts who can read them. Someone on that list will contact you to follow up on your report.
 
+[hbase_supported_tested_definitions]
+.Support and Testing Expectations
+
+The phrases /supported/, /not supported/, /tested/, and /not tested/ occur several
+places throughout this guide. In the interest of clarity, here is a brief explanation
+of what is generally meant by these phrases, in the context of HBase.
+
+NOTE: Commercial technical support for Apache HBase is provided by many Hadoop vendors.
+This is not the sense in which the term /support/ is used in the context of the
+Apache HBase project. The Apache HBase team assumes no responsibility for your
+HBase clusters, your configuration, or your data.
+
+Supported::
+  In the context of Apache HBase, /supported/ means that HBase is designed to work
+  in the way described, and deviation from the defined behavior or functionality should
+  be reported as a bug.
+
+Not Supported::
+  In the context of Apache HBase, /not supported/ means that a use case or use pattern
+  is not expected to work and should be considered an antipattern. If you think this
+  designation should be reconsidered for a given feature or use pattern, file a JIRA
+  or start a discussion on one of the mailing lists.
+
+Tested::
+  In the context of Apache HBase, /tested/ means that a feature is covered by unit
+  or integration tests, and has been proven to work as expected.
+
+Not Tested::
+  In the context of Apache HBase, /not tested/ means that a feature or use pattern
+  may or may notwork in a given way, and may or may not corrupt your data or cause
+  operational issues. It is an unknown, and there are no guarantees. If you can provide
+  proof that a feature designated as /not tested/ does work in a given way, please
+  submit the tests and/or the metrics so that other users can gain certainty about
+  such features or use patterns.
+
 :numbered:

http://git-wip-us.apache.org/repos/asf/hbase/blob/9cd48712/src/main/asciidoc/_chapters/upgrading.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/upgrading.adoc b/src/main/asciidoc/_chapters/upgrading.adoc
index 6327c5a..d731542 100644
--- a/src/main/asciidoc/_chapters/upgrading.adoc
+++ b/src/main/asciidoc/_chapters/upgrading.adoc
@@ -31,7 +31,7 @@ You cannot skip major versions when upgrading. If you are upgrading from version
 
 NOTE: It may be possible to skip across versions -- for example go from 0.92.2 straight to 0.98.0 just following the 0.96.x upgrade instructions -- but these scenarios are untested.
 
-Review <<configuration>>, in particular <<hadoop>>.
+Review <<configuration>>, in particular <<hadoop>>. Familiarize yourself with <<hbase_supported_tested_definitions>>.
 
 [[hbase.versioning]]
 == HBase version number and compatibility


[6/6] hbase git commit: HBASE-14969 Add throughput controller for flush; ADDENDUM

Posted by sy...@apache.org.
HBASE-14969 Add throughput controller for flush; ADDENDUM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fc5e698c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fc5e698c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fc5e698c

Branch: refs/heads/hbase-12439
Commit: fc5e698c0a4e9613ab9eddcfc2a2684424806c56
Parents: 2cc48e0
Author: stack <st...@apache.org>
Authored: Mon Feb 1 08:08:07 2016 -0800
Committer: stack <st...@apache.org>
Committed: Mon Feb 1 08:08:39 2016 -0800

----------------------------------------------------------------------
 .../throttle/TestFlushWithThroughputController.java         | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fc5e698c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
index 5d5be87..8908c71 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
@@ -168,9 +168,6 @@ public class TestFlushWithThroughputController {
     conf.setInt(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD,
       3000);
     TEST_UTIL.startMiniCluster(1);
-    assertEquals(10L * 1024 * 1024,
-      ((PressureAwareThroughputController) TEST_UTIL.getMiniHBaseCluster().getRegionServer(0)
-          .getFlushThroughputController()).getMaxThroughput(), EPSILON);
     Connection conn = ConnectionFactory.createConnection(conf);
     try {
       HTableDescriptor htd = new HTableDescriptor(tableName);
@@ -181,6 +178,12 @@ public class TestFlushWithThroughputController {
       HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName);
       PressureAwareFlushThroughputController throughputController =
           (PressureAwareFlushThroughputController) regionServer.getFlushThroughputController();
+      for (Region region : regionServer.getOnlineRegions()) {
+        region.flush(true);
+      }
+      assertEquals(0.0, regionServer.getFlushPressure(), EPSILON);
+      Thread.sleep(5000);
+      assertEquals(10L * 1024 * 1024, throughputController.getMaxThroughput(), EPSILON);
       Table table = conn.getTable(tableName);
       Random rand = new Random();
       for (int i = 0; i < 10; i++) {


[3/6] hbase git commit: HBASE-15195 Don't run findbugs on hbase-it; it has nothing in src/main/java

Posted by sy...@apache.org.
HBASE-15195 Don't run findbugs on hbase-it; it has nothing in src/main/java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/99551189
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/99551189
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/99551189

Branch: refs/heads/hbase-12439
Commit: 9955118995d4358432d7a7adbacbc687988d19da
Parents: 9ec408e
Author: stack <st...@apache.org>
Authored: Sun Jan 31 11:07:21 2016 -0400
Committer: stack <st...@apache.org>
Committed: Sun Jan 31 11:07:21 2016 -0400

----------------------------------------------------------------------
 dev-support/hbase-personality.sh | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/99551189/dev-support/hbase-personality.sh
----------------------------------------------------------------------
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 2d31fd5..d2e94e3 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -78,9 +78,12 @@ function personality_modules
 
   if [[ ${testtype} = findbugs ]]; then
     for module in ${CHANGED_MODULES}; do
-      # skip findbugs on hbase-shell
+      # skip findbugs on hbase-shell and hbase-it. hbase-it has nothing
+      # in src/main/java where findbugs goes to look
       if [[ ${module} == hbase-shell ]]; then
         continue
+      elif [[ ${module} == hbase-it ]]; then
+        continue
       else
         # shellcheck disable=SC2086
         personality_enqueue_module ${module} ${extra}


[5/6] hbase git commit: HBASE-15158 HBASE-15158 Preamble 1 of 2: fix findbugs, add javadoc, change Region#getReadpoint to #getReadPoint, and some util

Posted by sy...@apache.org.
HBASE-15158 HBASE-15158 Preamble 1 of 2: fix findbugs, add javadoc, change Region#getReadpoint to #getReadPoint, and some util


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2cc48e03
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2cc48e03
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2cc48e03

Branch: refs/heads/hbase-12439
Commit: 2cc48e039d1f800832ac8880bbc820982e0ac8a5
Parents: 13a46df
Author: stack <st...@apache.org>
Authored: Sun Jan 31 20:21:48 2016 -0800
Committer: stack <st...@apache.org>
Committed: Sun Jan 31 20:21:48 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Scan.java    |  2 +
 .../java/org/apache/hadoop/hbase/CellUtil.java  |  6 +-
 .../org/apache/hadoop/hbase/HConstants.java     |  5 --
 .../java/org/apache/hadoop/hbase/TagUtil.java   | 69 +++++++++++++++++++-
 .../example/ZooKeeperScanPolicyObserver.java    |  5 +-
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java  |  6 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java  |  3 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 27 +++++---
 .../hadoop/hbase/mapreduce/CellCounter.java     | 63 +++++++++---------
 .../hadoop/hbase/master/AssignmentManager.java  |  3 +-
 .../hbase/regionserver/DefaultMemStore.java     |  9 +--
 .../hadoop/hbase/regionserver/HRegion.java      | 44 +++++++------
 .../hadoop/hbase/regionserver/HStore.java       |  7 +-
 .../hbase/regionserver/KeyValueScanner.java     |  7 ++
 .../hadoop/hbase/regionserver/Region.java       | 27 +++++---
 .../hadoop/hbase/regionserver/RowProcessor.java | 10 +--
 .../hadoop/hbase/regionserver/StoreScanner.java |  6 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   | 16 ++++-
 .../hbase/regionserver/wal/FSWALEntry.java      |  2 +
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  2 +-
 .../hadoop/hbase/util/RegionSplitter.java       |  3 +-
 .../TestRegionObserverScannerOpenHook.java      |  2 +-
 .../regionserver/NoOpScanPolicyObserver.java    |  4 +-
 .../hbase/util/TestCoprocessorScanPolicy.java   |  2 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |  2 +-
 25 files changed, 219 insertions(+), 113 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index fe9745e..1892f54 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -227,6 +227,7 @@ public class Scan extends Query {
     filter = scan.getFilter(); // clone?
     loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
     consistency = scan.getConsistency();
+    this.setIsolationLevel(scan.getIsolationLevel());
     reversed = scan.isReversed();
     asyncPrefetch = scan.isAsyncPrefetch();
     small = scan.isSmall();
@@ -271,6 +272,7 @@ public class Scan extends Query {
     this.getScan = true;
     this.asyncPrefetch = false;
     this.consistency = get.getConsistency();
+    this.setIsolationLevel(get.getIsolationLevel());
     for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
       setAttribute(attr.getKey(), attr.getValue());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 7db1c76..7242791 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -842,7 +842,7 @@ public final class CellUtil {
     final int tagsLength = cell.getTagsLength();
     // Save an object allocation where we can
     if (tagsLength == 0) {
-      return EMPTY_TAGS_ITR;
+      return TagUtil.EMPTY_TAGS_ITR;
     }
     if (cell instanceof ByteBufferedCell) {
       return tagsIterator(((ByteBufferedCell) cell).getTagsByteBuffer(),
@@ -1388,7 +1388,7 @@ public final class CellUtil {
 
   /**
    * Compares the row of two keyvalues for equality
-   * 
+   *
    * @param left
    * @param right
    * @return True if rows match.
@@ -2307,4 +2307,4 @@ public final class CellUtil {
       return Type.DeleteFamily.getCode();
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1b71cb4..4e07e6a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1012,11 +1012,6 @@ public final class HConstants {
 
   public static final String LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop";
 
-  /**
-   * The byte array represents for NO_NEXT_INDEXED_KEY;
-   * The actual value is irrelevant because this is always compared by reference.
-   */
-  public static final Cell NO_NEXT_INDEXED_KEY = new KeyValue();
   /** delimiter used between portions of a region name */
   public static final int DELIMITER = ',';
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
index 15ddfc8..65f0cad 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -104,7 +105,7 @@ public final class TagUtil {
    * @return the serialized tag data as bytes
    */
   public static byte[] fromList(List<Tag> tags) {
-    if (tags.isEmpty()) {
+    if (tags == null || tags.isEmpty()) {
       return HConstants.EMPTY_BYTE_ARRAY;
     }
     int length = 0;
@@ -216,4 +217,70 @@ public final class TagUtil {
     }
     return StreamUtils.readRawVarint32(tag.getValueByteBuffer(), offset);
   }
+
+  /**
+   * @return A List&lt;Tag&gt; of any Tags found in <code>cell</code> else null.
+   */
+  public static List<Tag> carryForwardTags(final Cell cell) {
+    return carryForwardTags(null, cell);
+  }
+
+  /**
+   * Add to <code>tagsOrNull</code> any Tags <code>cell</code> is carrying or null if none.
+   */
+  public static List<Tag> carryForwardTags(final List<Tag> tagsOrNull, final Cell cell) {
+    Iterator<Tag> itr = CellUtil.tagsIterator(cell);
+    if (itr == EMPTY_TAGS_ITR) {
+      // If no Tags, return early.
+      return tagsOrNull;
+    }
+    List<Tag> tags = tagsOrNull;
+    if (tags == null) {
+      tags = new ArrayList<Tag>();
+    }
+    while (itr.hasNext()) {
+      tags.add(itr.next());
+    }
+    return tags;
+  }
+
+  /**
+   * @return Carry forward the TTL tag.
+   */
+  public static List<Tag> carryForwardTTLTag(final List<Tag> tagsOrNull, final long ttl) {
+    if (ttl == Long.MAX_VALUE) {
+      return tagsOrNull;
+    }
+    List<Tag> tags = tagsOrNull;
+    // If we are making the array in here, given we are the last thing checked, we'll be only thing
+    // in the array so set its size to '1' (I saw this being done in earlier version of
+    // tag-handling).
+    if (tags == null) {
+      tags = new ArrayList<Tag>(1);
+    }
+    tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl)));
+    return tags;
+  }
+
+  /**
+   * Iterator returned when no Tags. Used by CellUtil too.
+   */
+  static final Iterator<Tag> EMPTY_TAGS_ITR = new Iterator<Tag>() {
+    @Override
+    public boolean hasNext() {
+      return false;
+    }
+
+    @Override
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IT_NO_SUCH_ELEMENT",
+      justification="Intentional")
+    public Tag next() {
+      return null;
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+  };
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
index 420799f..48d7a55 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
@@ -33,10 +33,9 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.ScanInfo;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
+import org.apache.hadoop.hbase.regionserver.ScanInfo;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreScanner;
@@ -232,6 +231,6 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
       return null;
     }
     return new StoreScanner(store, scanInfo, scan, targetCols,
-      ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
+      ((HStore)store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 1bdba3b..9f29f97 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -34,18 +34,18 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.hbase.ByteBufferedKeyOnlyKeyValue;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue;
-import org.apache.hadoop.hbase.ByteBufferedKeyOnlyKeyValue;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader;
 import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.ObjectIntPair;
@@ -289,7 +289,7 @@ public class HFileBlockIndex {
       if (rootLevelIndex < blockKeys.length - 1) {
         nextIndexedKey = blockKeys[rootLevelIndex + 1];
       } else {
-        nextIndexedKey = HConstants.NO_NEXT_INDEXED_KEY;
+        nextIndexedKey = KeyValueScanner.NO_NEXT_INDEXED_KEY;
       }
 
       int lookupLevel = 1; // How many levels deep we are in our lookup.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 4db26d1..a873280 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.security.EncryptionUtil;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -788,7 +789,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
         } else {
           // The comparison with no_next_index_key has to be checked
           if (this.nextIndexedKey != null &&
-              (this.nextIndexedKey == HConstants.NO_NEXT_INDEXED_KEY || reader
+              (this.nextIndexedKey == KeyValueScanner.NO_NEXT_INDEXED_KEY || reader
               .getComparator().compareKeyIgnoresMvcc(key, nextIndexedKey) < 0)) {
             // The reader shall continue to scan the current data block instead
             // of querying the

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 922174d..a9c64a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -57,7 +57,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
@@ -67,10 +66,8 @@ import javax.security.sasl.SaslServer;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CallQueueTooBigException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CallQueueTooBigException;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -79,13 +76,15 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Operation;
 import org.apache.hadoop.hbase.client.VersionInfoUtil;
 import org.apache.hadoop.hbase.codec.Codec;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.exceptions.RegionMovedException;
-import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
 import org.apache.hadoop.hbase.io.BoundedByteBufferPool;
+import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
 import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -101,11 +100,11 @@ import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.AuthMethod;
 import org.apache.hadoop.hbase.security.HBasePolicyProvider;
 import org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
 import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
 import org.apache.hadoop.hbase.security.SaslStatus;
 import org.apache.hadoop.hbase.security.SaslUtil;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -126,8 +125,8 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.StringUtils;
-import org.codehaus.jackson.map.ObjectMapper;
 import org.apache.htrace.TraceInfo;
+import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.BlockingService;
@@ -1918,11 +1917,21 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       data = null;
       if (!channel.isOpen())
         return;
-      try {socket.shutdownOutput();} catch(Exception ignored) {} // FindBugs DE_MIGHT_IGNORE
+      try {socket.shutdownOutput();} catch(Exception ignored) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Ignored exception", ignored);
+        }
+      }
       if (channel.isOpen()) {
         try {channel.close();} catch(Exception ignored) {}
       }
-      try {socket.close();} catch(Exception ignored) {}
+      try {
+        socket.close();
+      } catch(Exception ignored) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Ignored exception", ignored);
+        }
+      }
     }
 
     private UserGroupInformation createUser(ConnectionHeader head) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
index 0675b73..00f197c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
@@ -120,39 +120,40 @@ public class CellCounter extends Configured implements Tool {
       try {
         context.getCounter(Counters.ROWS).increment(1);
         context.write(new Text("Total ROWS"), new IntWritable(1));
-
-        for (Cell value : values.listCells()) {
-          currentRowKey = Bytes.toStringBinary(CellUtil.cloneRow(value));
-          String thisRowFamilyName = Bytes.toStringBinary(CellUtil.cloneFamily(value));
-          if (!thisRowFamilyName.equals(currentFamilyName)) {
-            currentFamilyName = thisRowFamilyName;
-            context.getCounter("CF", thisRowFamilyName).increment(1);
-            if (1 == context.getCounter("CF", thisRowFamilyName).getValue()) {
-              context.write(new Text("Total Families Across all Rows"), new IntWritable(1));
-              context.write(new Text(thisRowFamilyName), new IntWritable(1));
+        if (values != null && !values.isEmpty()) {
+          for (Cell value : values.listCells()) {
+            currentRowKey = Bytes.toStringBinary(CellUtil.cloneRow(value));
+            String thisRowFamilyName = Bytes.toStringBinary(CellUtil.cloneFamily(value));
+            if (!thisRowFamilyName.equals(currentFamilyName)) {
+              currentFamilyName = thisRowFamilyName;
+              context.getCounter("CF", thisRowFamilyName).increment(1);
+              if (1 == context.getCounter("CF", thisRowFamilyName).getValue()) {
+                context.write(new Text("Total Families Across all Rows"), new IntWritable(1));
+                context.write(new Text(thisRowFamilyName), new IntWritable(1));
+              }
             }
-          }
-          String thisRowQualifierName = thisRowFamilyName + separator
-              + Bytes.toStringBinary(CellUtil.cloneQualifier(value));
-          if (!thisRowQualifierName.equals(currentQualifierName)) {
-            currentQualifierName = thisRowQualifierName;
-            context.getCounter("CFQL", thisRowQualifierName).increment(1);
-            context.write(new Text("Total Qualifiers across all Rows"),
-              new IntWritable(1));
-            context.write(new Text(thisRowQualifierName), new IntWritable(1));
-            // Intialize versions
-            context.getCounter("QL_VERSIONS", currentRowKey + separator +
-              thisRowQualifierName).increment(1);
-            context.write(new Text(currentRowKey + separator
-                + thisRowQualifierName + "_Versions"), new IntWritable(1));
+            String thisRowQualifierName = thisRowFamilyName + separator
+                + Bytes.toStringBinary(CellUtil.cloneQualifier(value));
+            if (!thisRowQualifierName.equals(currentQualifierName)) {
+              currentQualifierName = thisRowQualifierName;
+              context.getCounter("CFQL", thisRowQualifierName).increment(1);
+              context.write(new Text("Total Qualifiers across all Rows"),
+                  new IntWritable(1));
+              context.write(new Text(thisRowQualifierName), new IntWritable(1));
+              // Intialize versions
+              context.getCounter("QL_VERSIONS", currentRowKey + separator +
+                  thisRowQualifierName).increment(1);
+              context.write(new Text(currentRowKey + separator
+                  + thisRowQualifierName + "_Versions"), new IntWritable(1));
 
-          } else {
-            // Increment versions
-            currentQualifierName = thisRowQualifierName;
-            context.getCounter("QL_VERSIONS", currentRowKey + separator +
-              thisRowQualifierName).increment(1);
-            context.write(new Text(currentRowKey + separator
-                + thisRowQualifierName + "_Versions"), new IntWritable(1));
+            } else {
+              // Increment versions
+              currentQualifierName = thisRowQualifierName;
+              context.getCounter("QL_VERSIONS", currentRowKey + separator +
+                  thisRowQualifierName).increment(1);
+              context.write(new Text(currentRowKey + separator
+                  + thisRowQualifierName + "_Versions"), new IntWritable(1));
+            }
           }
         }
       } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index b455828..7639004 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -1798,7 +1798,8 @@ public class AssignmentManager {
         invokeUnAssign(regionInfo);
         break;
       default:
-        // No process for other states
+          // No process for other states
+          break;
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index 89ae0d1..2984754 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -454,10 +454,6 @@ public class DefaultMemStore implements MemStore {
    * value for that row/family/qualifier.  If a KeyValue did already exist,
    * it will then be removed.
    * <p>
-   * Currently the memstoreTS is kept at 0 so as each insert happens, it will
-   * be immediately visible.  May want to change this so it is atomic across
-   * all KeyValues.
-   * <p>
    * This is called under row lock, so Get operations will still see updates
    * atomically.  Scans will only see each KeyValue update as atomic.
    *
@@ -484,8 +480,7 @@ public class DefaultMemStore implements MemStore {
    * family, and qualifier, they are removed.
    * <p>
    * Callers must hold the read lock.
-   *
-   * @param cell
+   * @param readpoint Smallest outstanding readpoint; below which we can remove duplicate Cells.
    * @return change in size of MemStore
    */
   private long upsert(Cell cell, long readpoint) {
@@ -505,7 +500,7 @@ public class DefaultMemStore implements MemStore {
         cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
     SortedSet<Cell> ss = cellSet.tailSet(firstCell);
     Iterator<Cell> it = ss.iterator();
-    // versions visible to oldest scanner
+    // Versions visible to oldest scanner.
     int versionsVisible = 0;
     while ( it.hasNext() ) {
       Cell cur = it.next();

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 05ef2ad..c93123c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1263,28 +1263,34 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     }
   }
 
-   public MultiVersionConcurrencyControl getMVCC() {
-     return mvcc;
-   }
+  @VisibleForTesting
+  public MultiVersionConcurrencyControl getMVCC() {
+    return mvcc;
+  }
 
-   @Override
-   public long getMaxFlushedSeqId() {
-     return maxFlushedSeqId;
-   }
+  @Override
+  public long getMaxFlushedSeqId() {
+    return maxFlushedSeqId;
+  }
 
-   @Override
-   public long getReadpoint(IsolationLevel isolationLevel) {
-     if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-       // This scan can read even uncommitted transactions
-       return Long.MAX_VALUE;
-     }
-     return mvcc.getReadPoint();
-   }
+  @Override
+  public long getReadPoint(IsolationLevel isolationLevel) {
+    if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+      // This scan can read even uncommitted transactions
+      return Long.MAX_VALUE;
+    }
+    return mvcc.getReadPoint();
+  }
 
-   @Override
-   public boolean isLoadingCfsOnDemandDefault() {
-     return this.isLoadingCfsOnDemandDefault;
-   }
+  @Override
+  public long getReadpoint(IsolationLevel isolationLevel) {
+    return getReadPoint(isolationLevel);
+  }
+
+  @Override
+  public boolean isLoadingCfsOnDemandDefault() {
+    return this.isLoadingCfsOnDemandDefault;
+  }
 
   /**
    * Close down this HRegion.  Flush the cache, shut down each HStore, don't

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 7f90e17..c65326a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1304,8 +1304,11 @@ public class HStore implements Store {
     HRegionInfo info = this.region.getRegionInfo();
     CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info,
         family.getName(), inputPaths, outputPaths, fs.getStoreDir(getFamily().getNameAsString()));
-    WALUtil.writeCompactionMarker(region.getWAL(), this.region.getTableDesc(),
-        this.region.getRegionInfo(), compactionDescriptor, region.getMVCC());
+    // Fix reaching into Region to get the maxWaitForSeqId.
+    // Does this method belong in Region altogether given it is making so many references up there?
+    // Could be Region#writeCompactionMarker(compactionDescriptor);
+    WALUtil.writeCompactionMarker(this.region.getWAL(), this.region.getTableDesc(),
+        this.region.getRegionInfo(), compactionDescriptor, this.region.getMVCC());
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
index a9322e3..eae713f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 
 /**
@@ -30,6 +31,12 @@ import org.apache.hadoop.hbase.client.Scan;
 @InterfaceAudience.Private
 public interface KeyValueScanner extends Shipper {
   /**
+   * The byte array represents for NO_NEXT_INDEXED_KEY;
+   * The actual value is irrelevant because this is always compared by reference.
+   */
+  public static final Cell NO_NEXT_INDEXED_KEY = new KeyValue();
+
+  /**
    * Look at the next Cell in this scanner, but do not iterate scanner.
    * @return the next Cell
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 5da8bcb..c0bc8fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -157,6 +157,13 @@ public interface Region extends ConfigurationObserver {
   boolean isLoadingCfsOnDemandDefault();
 
   /** @return readpoint considering given IsolationLevel */
+  long getReadPoint(IsolationLevel isolationLevel);
+
+  /**
+   * @return readpoint considering given IsolationLevel
+   * @deprecated Since 1.2.0. Use {@link #getReadPoint(IsolationLevel)} instead.
+   */
+  @Deprecated
   long getReadpoint(IsolationLevel isolationLevel);
 
   /**
@@ -217,8 +224,8 @@ public interface Region extends ConfigurationObserver {
   // Region read locks
 
   /**
-   * Operation enum is used in {@link Region#startRegionOperation} to provide context for
-   * various checks before any region operation begins.
+   * Operation enum is used in {@link Region#startRegionOperation} and elsewhere to provide
+   * context for various checks.
    */
   enum Operation {
     ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE,
@@ -323,9 +330,10 @@ public interface Region extends ConfigurationObserver {
    OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId) throws IOException;
 
   /**
-   * Atomically checks if a row/family/qualifier value matches the expected val
-   * If it does, it performs the row mutations.  If the passed value is null, t
-   * is for the lack of column (ie: non-existence)
+   * Atomically checks if a row/family/qualifier value matches the expected value and if it does,
+   * it performs the mutation. If the passed value is null, the lack of column value
+   * (ie: non-existence) is used. See checkAndRowMutate to do many checkAndPuts at a time on a
+   * single row.
    * @param row to check
    * @param family column family to check
    * @param qualifier column qualifier to check
@@ -340,9 +348,10 @@ public interface Region extends ConfigurationObserver {
       ByteArrayComparable comparator, Mutation mutation, boolean writeToWAL) throws IOException;
 
   /**
-   * Atomically checks if a row/family/qualifier value matches the expected val
-   * If it does, it performs the row mutations.  If the passed value is null, t
-   * is for the lack of column (ie: non-existence)
+   * Atomically checks if a row/family/qualifier value matches the expected values and if it does,
+   * it performs the row mutations. If the passed value is null, the lack of column value
+   * (ie: non-existence) is used. Use to do many mutations on a single row. Use checkAndMutate
+   * to do one checkAndMutate at a time.
    * @param row to check
    * @param family column family to check
    * @param qualifier column qualifier to check
@@ -350,7 +359,7 @@ public interface Region extends ConfigurationObserver {
    * @param comparator
    * @param mutations
    * @param writeToWAL
-   * @return true if mutation was applied, false otherwise
+   * @return true if mutations were applied, false otherwise
    * @throws IOException
    */
   boolean checkAndRowMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp,

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java
index cfe42e4..34901b7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java
@@ -35,7 +35,7 @@ import com.google.protobuf.Message;
  * Defines the procedure to atomically perform multiple scans and mutations
  * on a HRegion.
  *
- * This is invoked by HRegion#processRowsWithLocks().
+ * This is invoked by {@link Region#processRowsWithLocks(RowProcessor)}.
  * This class performs scans and generates mutations and WAL edits.
  * The locks and MVCC will be handled by HRegion.
  *
@@ -98,10 +98,8 @@ public interface RowProcessor<S extends Message, T extends Message> {
 
   /**
    * The hook to be executed after the process() but before applying the Mutations to region. Also
-   * by the time this hook is been called, mvcc transaction is started.
-   * @param region
+   * by the time this hook is called, mvcc transaction have started.
    * @param walEdit the output WAL edits to apply to write ahead log
-   * @throws IOException
    */
   void preBatchMutate(HRegion region, WALEdit walEdit) throws IOException;
 
@@ -109,8 +107,6 @@ public interface RowProcessor<S extends Message, T extends Message> {
    * The hook to be executed after the process() and applying the Mutations to region. The
    * difference of this one with {@link #postProcess(HRegion, WALEdit, boolean)} is this hook will
    * be executed before the mvcc transaction completion.
-   * @param region
-   * @throws IOException
    */
   void postBatchMutate(HRegion region) throws IOException;
 
@@ -156,4 +152,4 @@ public interface RowProcessor<S extends Message, T extends Message> {
    * @return The {@link Durability} to use
    */
   Durability useDurability();
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index c4c509f..2f0d284 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -260,7 +260,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
       List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
       long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
     this(store, scan, scanInfo, null,
-      ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED), false);
+      ((HStore)store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false);
     if (dropDeletesFromRow == null) {
       matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint,
           earliestPutTs, oldestUnexpiredTS, now, store.getCoprocessorHost());
@@ -659,7 +659,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
     case SEEK_NEXT_COL:
     {
       Cell nextIndexedKey = getNextIndexedKey();
-      if (nextIndexedKey != null && nextIndexedKey != HConstants.NO_NEXT_INDEXED_KEY
+      if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY
           && matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) {
         return qcode == MatchCode.SEEK_NEXT_COL ? MatchCode.SKIP : MatchCode.INCLUDE;
       }
@@ -669,7 +669,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
     case SEEK_NEXT_ROW:
     {
       Cell nextIndexedKey = getNextIndexedKey();
-      if (nextIndexedKey != null && nextIndexedKey != HConstants.NO_NEXT_INDEXED_KEY
+      if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY
           && matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) {
         return qcode == MatchCode.SEEK_NEXT_ROW ? MatchCode.SKIP : MatchCode.INCLUDE;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 9ae72e6..47e28b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -1,4 +1,5 @@
 /**
+
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -281,8 +282,6 @@ public class FSHLog implements WAL {
 
   private final int slowSyncNs;
 
-  private final static Object [] NO_ARGS = new Object []{};
-
   // If live datanode count is lower than the default replicas value,
   // RollWriter will be triggered in each sync(So the RollWriter will be
   // triggered one by one in a short time). Using it as a workaround to slow
@@ -1069,6 +1068,19 @@ public class FSHLog implements WAL {
     }
   }
 
+  /**
+   * NOTE: This append, at a time that is usually after this call returns, starts an
+   * mvcc transaction by calling 'begin' wherein which we assign this update a sequenceid. At
+   * assignment time, we stamp all the passed in Cells inside WALEdit with their sequenceId.
+   * You must 'complete' the transaction this mvcc transaction by calling
+   * MultiVersionConcurrencyControl#complete(...) or a variant otherwise mvcc will get stuck. Do it
+   * in the finally of a try/finally
+   * block within which this append lives and any subsequent operations like sync or
+   * update of memstore, etc. Get the WriteEntry to pass mvcc out of the passed in WALKey
+   * <code>walKey</code> parameter. Be warned that the WriteEntry is not immediately available
+   * on return from this method. It WILL be available subsequent to a sync of this append;
+   * otherwise, you will just have to wait on the WriteEntry to get filled in.
+   */
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH_EXCEPTION",
       justification="Will never be null")
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
index 7f3eb61..5fe2061 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
@@ -106,6 +106,8 @@ class FSWALEntry extends Entry {
 
   /**
    * Here is where a WAL edit gets its sequenceid.
+   * SIDE-EFFECT is our stamping the sequenceid into every Cell AND setting the sequenceid into the
+   * MVCC WriteEntry!!!!
    * @return The sequenceid we stamped on this edit.
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index bd5ca9d..a9113ec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -1454,7 +1454,7 @@ public class HBaseFsck extends Configured implements Closeable {
         "You may need to restore the previously sidelined hbase:meta");
       return false;
     }
-    meta.batchMutate(puts.toArray(new Put[puts.size()]));
+    meta.batchMutate(puts.toArray(new Put[puts.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
     meta.close();
     if (meta.getWAL() != null) {
       meta.getWAL().close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index ea704f8..a6f70c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -1060,7 +1060,8 @@ public class RegionSplitter {
           "Could not split region with given user input: " + this);
 
       // remove endpoints, which are included in the splits list
-      return Arrays.copyOfRange(splits, 1, splits.length - 1);
+
+      return splits == null? null: Arrays.copyOfRange(splits, 1, splits.length - 1);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index 5241dbe..a8983ef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -109,7 +109,7 @@ public class TestRegionObserverScannerOpenHook {
         throws IOException {
       scan.setFilter(new NoDataFilter());
       return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
-        ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
+        ((HStore)store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
index b64a031..025a28d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
@@ -76,9 +76,9 @@ public class NoOpScanPolicyObserver extends BaseRegionObserver {
       throws IOException {
     Region r = c.getEnvironment().getRegion();
     return scan.isReversed() ? new ReversedStoreScanner(store,
-        store.getScanInfo(), scan, targetCols, r.getReadpoint(scan
+        store.getScanInfo(), scan, targetCols, r.getReadPoint(scan
             .getIsolationLevel())) : new StoreScanner(store,
-        store.getScanInfo(), scan, targetCols, r.getReadpoint(scan
+        store.getScanInfo(), scan, targetCols, r.getReadPoint(scan
             .getIsolationLevel()));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
index c988761..b4eb798 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
@@ -298,7 +298,7 @@ public class TestCoprocessorScanPolicy {
             newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
             oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
         return new StoreScanner(store, scanInfo, scan, targetCols,
-            ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
+            ((HStore) store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED));
       } else {
         return s;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cc48e03/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index c33b2c2..c0939f3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -1436,7 +1436,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
     }
   }
 
-  @Test(timeout=60000)
+  @Test(timeout=180000)
   public void testCheckTableLocks() throws Exception {
     IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(0);
     EnvironmentEdgeManager.injectEdge(edge);


[2/6] hbase git commit: TestStochasticLoadBalancer.testRegionReplicationOnMidClusterSameHosts flaky on trunk

Posted by sy...@apache.org.
TestStochasticLoadBalancer.testRegionReplicationOnMidClusterSameHosts flaky on trunk


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9ec408e2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9ec408e2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9ec408e2

Branch: refs/heads/hbase-12439
Commit: 9ec408e25b70f4ce586340b9396da67a1e38f6ca
Parents: 9cd4871
Author: stack <st...@apache.org>
Authored: Sat Jan 30 07:51:21 2016 -0400
Committer: stack <st...@apache.org>
Committed: Sat Jan 30 07:51:21 2016 -0400

----------------------------------------------------------------------
 .../hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9ec408e2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
index 7abbeb4..8300f32 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -464,7 +465,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
     testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
   }
 
-  @Test (timeout = 800000)
+  @Ignore @Test (timeout = 800000) // Test is flakey. TODO: Fix!
   public void testRegionReplicationOnMidClusterSameHosts() {
     conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
     conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec


[4/6] hbase git commit: HBASE-15190 Monkey dies when running on shared cluster (gives up when can't kill the other fellows processes)

Posted by sy...@apache.org.
HBASE-15190 Monkey dies when running on shared cluster (gives up when can't kill the other fellows processes)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/13a46df1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/13a46df1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/13a46df1

Branch: refs/heads/hbase-12439
Commit: 13a46df1815ed32bc9a2696f19cf620b4ce84bb4
Parents: 9955118
Author: stack <st...@apache.org>
Authored: Sun Jan 31 10:51:41 2016 -0600
Committer: stack <st...@apache.org>
Committed: Sun Jan 31 10:51:41 2016 -0600

----------------------------------------------------------------------
 .../test/java/org/apache/hadoop/hbase/HBaseClusterManager.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/13a46df1/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
index c49ae44..ba6a4a9 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
@@ -174,7 +174,7 @@ public class HBaseClusterManager extends Configured implements ClusterManager {
     }
 
     protected String findPidCommand(ServiceType service) {
-      return String.format("ps aux | grep proc_%s | grep -v grep | tr -s ' ' | cut -d ' ' -f2",
+      return String.format("ps ux | grep proc_%s | grep -v grep | tr -s ' ' | cut -d ' ' -f2",
           service);
     }
 
@@ -269,7 +269,7 @@ public class HBaseClusterManager extends Configured implements ClusterManager {
 
     @Override
     protected String findPidCommand(ServiceType service) {
-      return String.format("ps aux | grep %s | grep -v grep | tr -s ' ' | cut -d ' ' -f2",
+      return String.format("ps ux | grep %s | grep -v grep | tr -s ' ' | cut -d ' ' -f2",
         service);
     }
   }